code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
__lowercase = mock.Mock()
__lowercase = 500
__lowercase = {}
__lowercase = HTTPError
__lowercase = {}
# Download this model to make sure it's in the cache.
__lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
__lowercase = tempfile.mktemp()
with open(_lowerCamelCase , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , _lowerCamelCase )
__lowercase = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , _lowerCamelCase )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
__lowercase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class __a ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> List[Any]:
'''simple docstring'''
__lowercase = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase , repo_id="test-tokenizer" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-tokenizer-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__lowercase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = os.path.join(_lowerCamelCase , "vocab.txt" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__lowercase = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
__lowercase = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
__lowercase = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCamelCase , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__lowercase = Trie()
__lowercase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase , ["AB", "C"] )
| 118 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowercase = logging.get_logger(__name__)
class __a ( __a ):
'''simple docstring'''
def __init__( self , **_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["bs4"] )
super().__init__(**_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__lowercase = parent.find_all(child.name , recursive=_lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_lowerCamelCase ) else next(i for i, s in enumerate(_lowerCamelCase , 1 ) if s is child ) )
__lowercase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = BeautifulSoup(_lowerCamelCase , "html.parser" )
__lowercase = []
__lowercase = []
__lowercase = []
for element in html_code.descendants:
if type(_lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__lowercase = html.unescape(_lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_lowerCamelCase )
__lowercase , __lowercase = self.xpath_soup(_lowerCamelCase )
stringaxtag_seq.append(_lowerCamelCase )
stringaxsubs_seq.append(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = ""
for tagname, subs in zip(_lowerCamelCase , _lowerCamelCase ):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self , _lowerCamelCase ) -> BatchFeature:
'''simple docstring'''
__lowercase = False
# Check that strings has a valid type
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowercase = True
elif isinstance(_lowerCamelCase , (list, tuple) ):
if len(_lowerCamelCase ) == 0 or isinstance(html_strings[0] , _lowerCamelCase ):
__lowercase = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f'''but is of type {type(_lowerCamelCase )}.''' )
__lowercase = bool(isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , _lowerCamelCase )) )
if not is_batched:
__lowercase = [html_strings]
# Get nodes + xpaths
__lowercase = []
__lowercase = []
for html_string in html_strings:
__lowercase , __lowercase , __lowercase = self.get_three_from_single(_lowerCamelCase )
nodes.append(_lowerCamelCase )
__lowercase = []
for node, tag_list, sub_list in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__lowercase = self.construct_xpath(_lowerCamelCase , _lowerCamelCase )
xpath_strings.append(_lowerCamelCase )
xpaths.append(_lowerCamelCase )
# return as Dict
__lowercase = {"nodes": nodes, "xpaths": xpaths}
__lowercase = BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
return encoded_inputs
| 118 | 1 |
'''simple docstring'''
from torch import nn
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] ):
super().__init__()
lowercase_ : Optional[Any] = class_size
lowercase_ : str = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowercase_ : Union[str, Any] = nn.Linear(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[str] ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowercase_ : Dict = self.mlp(lowercase_ )
return logits
| 30 | '''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30 | 1 |
'''simple docstring'''
import qiskit
def _snake_case ( A_ : int , A_ : int ):
"""simple docstring"""
a_ : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
a_ : List[str] = qiskit.QuantumCircuit(A_ , A_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a_ : int = qiskit.execute(A_ , A_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(A_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 577 |
'''simple docstring'''
def _snake_case ( A_ : Optional[int] ):
"""simple docstring"""
a_ : str = len(A_ )
for i in range(length - 1 ):
a_ : List[Any] = i
for k in range(i + 1 , A_ ):
if collection[k] < collection[least]:
a_ : Union[str, Any] = k
if least != i:
a_ , a_ : int = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
__snake_case: Any = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 577 | 1 |
from ...configuration_utils import PretrainedConfig
UpperCAmelCase_ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : str = """tapas"""
def __init__( self, snake_case__=3_05_22, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=10_24, snake_case__=[3, 2_56, 2_56, 2, 2_56, 2_56, 10], snake_case__=0.02, snake_case__=1E-12, snake_case__=0, snake_case__=10.0, snake_case__=0, snake_case__=1.0, snake_case__=None, snake_case__=1.0, snake_case__=False, snake_case__=None, snake_case__=1.0, snake_case__=1.0, snake_case__=False, snake_case__=False, snake_case__="ratio", snake_case__=None, snake_case__=None, snake_case__=64, snake_case__=32, snake_case__=False, snake_case__=True, snake_case__=False, snake_case__=False, snake_case__=True, snake_case__=False, snake_case__=None, snake_case__=None, **snake_case__, ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__, **snake_case__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase_ : Dict = vocab_size
lowercase_ : Optional[int] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : str = hidden_act
lowercase_ : Dict = intermediate_size
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Dict = max_position_embeddings
lowercase_ : Any = type_vocab_sizes
lowercase_ : Any = initializer_range
lowercase_ : str = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase_ : Optional[int] = positive_label_weight
lowercase_ : Optional[int] = num_aggregation_labels
lowercase_ : Tuple = aggregation_loss_weight
lowercase_ : Union[str, Any] = use_answer_as_supervision
lowercase_ : Any = answer_loss_importance
lowercase_ : List[Any] = use_normalized_answer_loss
lowercase_ : List[str] = huber_loss_delta
lowercase_ : Any = temperature
lowercase_ : Union[str, Any] = aggregation_temperature
lowercase_ : Optional[int] = use_gumbel_for_cells
lowercase_ : Optional[Any] = use_gumbel_for_aggregation
lowercase_ : List[Any] = average_approximation_function
lowercase_ : int = cell_selection_preference
lowercase_ : Any = answer_loss_cutoff
lowercase_ : int = max_num_rows
lowercase_ : List[Any] = max_num_columns
lowercase_ : str = average_logits_per_cell
lowercase_ : List[str] = select_one_column
lowercase_ : Optional[int] = allow_empty_column_selection
lowercase_ : Tuple = init_cell_selection_weights_to_zero
lowercase_ : Optional[int] = reset_position_index_per_cell
lowercase_ : int = disable_per_token_loss
# Aggregation hyperparameters
lowercase_ : Optional[Any] = aggregation_labels
lowercase_ : Optional[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels, snake_case__ ):
lowercase_ : Optional[int] = {int(snake_case__ ): v for k, v in aggregation_labels.items()}
| 711 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __magic_name__ ( lowercase="" ) -> str:
"""simple docstring"""
lowercase_ : Dict = tempfile.mkdtemp()
return os.path.join(lowercase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : Dict = torch.rand(12, dtype=torch.floataa ) - 0.5
lowercase_ : Union[str, Any] = AgentAudio(snake_case__ )
lowercase_ : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case__, agent_type.to_raw(), atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case__ ) )
# Ensure that the file contains the same value as the original tensor
lowercase_ , lowercase_ : Any = sf.read(snake_case__ )
self.assertTrue(torch.allclose(snake_case__, torch.tensor(snake_case__ ), atol=1E-4 ) )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Dict = torch.rand(12, dtype=torch.floataa ) - 0.5
lowercase_ : List[str] = get_new_path(suffix=""".wav""" )
sf.write(snake_case__, snake_case__, 1_60_00 )
lowercase_ : int = AgentAudio(snake_case__ )
self.assertTrue(torch.allclose(snake_case__, agent_type.to_raw(), atol=1E-4 ) )
self.assertEqual(agent_type.to_string(), snake_case__ )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : int = torch.randint(0, 2_56, (64, 64, 3) )
lowercase_ : Dict = AgentImage(snake_case__ )
lowercase_ : Optional[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case__, agent_type._tensor, atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : int = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
lowercase_ : Optional[Any] = Image.open(snake_case__ )
lowercase_ : List[str] = AgentImage(snake_case__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : int = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
lowercase_ : Optional[int] = Image.open(snake_case__ )
lowercase_ : List[Any] = AgentImage(snake_case__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : Optional[int] = """Hey!"""
lowercase_ : Tuple = AgentText(snake_case__ )
self.assertEqual(snake_case__, agent_type.to_string() )
self.assertEqual(snake_case__, agent_type.to_raw() )
self.assertEqual(snake_case__, snake_case__ ) | 436 | 0 |
from math import ceil
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = list(range(0, UpperCamelCase__ ) )
lowercase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase = []
for i in device_map_blocks:
if device_map_blocks.count(UpperCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(UpperCamelCase__ )
# Missing blocks
lowercase = [i for i in blocks if i not in device_map_blocks]
lowercase = [i for i in device_map_blocks if i not in blocks]
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(UpperCamelCase__ ) )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> List[Any]:
"""simple docstring"""
lowercase = list(range(UpperCamelCase__ ) )
lowercase = int(ceil(n_layers / len(UpperCamelCase__ ) ) )
lowercase = [layers[i : i + n_blocks] for i in range(0, UpperCamelCase__, UpperCamelCase__ )]
return dict(zip(UpperCamelCase__, UpperCamelCase__ ) )
| 604 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =field(default='''automatic-speech-recognition''' ,metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ =Features({'''audio''': Audio()} )
a__ =Features({'''transcription''': Value('''string''' )} )
a__ ="audio"
a__ ="transcription"
def __lowerCAmelCase ( self , A ) -> List[str]:
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , A ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
_UpperCAmelCase : Dict = copy.deepcopy(self )
_UpperCAmelCase : Union[str, Any] = self.input_schema.copy()
_UpperCAmelCase : Dict = features[self.audio_column]
_UpperCAmelCase : Optional[int] = input_schema
return task_template
@property
def __lowerCAmelCase ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 506 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
a__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
a__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase )
a__ = -1
a__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase )
a__ = model.generate(lowerCamelCase , max_new_tokens=10 , do_sample=lowerCamelCase )
a__ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
a__ = TextStreamer(lowerCamelCase )
model.generate(lowerCamelCase , max_new_tokens=10 , do_sample=lowerCamelCase , streamer=lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a__ = cs.out[:-1]
self.assertEqual(lowerCamelCase , lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
a__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase )
a__ = -1
a__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase )
a__ = model.generate(lowerCamelCase , max_new_tokens=10 , do_sample=lowerCamelCase )
a__ = tokenizer.decode(greedy_ids[0] )
a__ = TextIteratorStreamer(lowerCamelCase )
a__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
a__ = Thread(target=model.generate , kwargs=lowerCamelCase )
thread.start()
a__ = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase , lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
a__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase )
a__ = -1
a__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase )
a__ = model.generate(lowerCamelCase , max_new_tokens=10 , do_sample=lowerCamelCase )
a__ = greedy_ids[:, input_ids.shape[1] :]
a__ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
a__ = TextStreamer(lowerCamelCase , skip_prompt=lowerCamelCase )
model.generate(lowerCamelCase , max_new_tokens=10 , do_sample=lowerCamelCase , streamer=lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a__ = cs.out[:-1]
self.assertEqual(lowerCamelCase , lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = AutoTokenizer.from_pretrained("""distilgpt2""" )
a__ = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase )
a__ = -1
a__ = torch.ones((1, 5) , device=lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
a__ = TextStreamer(lowerCamelCase , skip_special_tokens=lowerCamelCase )
model.generate(lowerCamelCase , max_new_tokens=1 , do_sample=lowerCamelCase , streamer=lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
a__ = cs.out[:-1] # Remove the final "\n"
a__ = tokenizer(lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _A ( self ):
'''simple docstring'''
a__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
a__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase )
a__ = -1
a__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase )
a__ = TextIteratorStreamer(lowerCamelCase , timeout=0.0_0_1 )
a__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
a__ = Thread(target=model.generate , kwargs=lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase ):
a__ = """"""
for new_text in streamer:
streamer_text += new_text
| 412 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict =OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
_lowercase : str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase ( lowercase__ : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a__ = model_type_to_module_name(lowercase__ )
a__ = importlib.import_module(f'.{module_name}' , """transformers.models""" )
try:
return getattr(lowercase__ , lowercase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowercase__ , """__name__""" , lowercase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a__ = importlib.import_module("""transformers""" )
if hasattr(lowercase__ , lowercase__ ):
return getattr(lowercase__ , lowercase__ )
return None
def UpperCAmelCase ( lowercase__ : Union[str, os.PathLike] , lowercase__ : Optional[Union[str, os.PathLike]] = None , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : Optional[Dict[str, str]] = None , lowercase__ : Optional[Union[bool, str]] = None , lowercase__ : Optional[str] = None , lowercase__ : bool = False , **lowercase__ : str , ):
'''simple docstring'''
a__ = get_file_from_repo(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(lowercase__ , encoding="""utf-8""" ) as reader:
return json.load(lowercase__ )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase )
def _A ( cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
a__ = kwargs.pop("""config""" , lowerCamelCase )
a__ = kwargs.pop("""trust_remote_code""" , lowerCamelCase )
a__ = True
a__ , a__ = FeatureExtractionMixin.get_feature_extractor_dict(lowerCamelCase , **lowerCamelCase )
a__ = config_dict.get("""feature_extractor_type""" , lowerCamelCase )
a__ = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
a__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
a__ = AutoConfig.from_pretrained(lowerCamelCase , **lowerCamelCase )
# It could be in `config.feature_extractor_type``
a__ = getattr(lowerCamelCase , """feature_extractor_type""" , lowerCamelCase )
if hasattr(lowerCamelCase , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
a__ = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
a__ = feature_extractor_class_from_name(lowerCamelCase )
a__ = feature_extractor_auto_map is not None
a__ = feature_extractor_class is not None or type(lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING
a__ = resolve_trust_remote_code(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if has_remote_code and trust_remote_code:
a__ = get_class_from_dynamic_module(
lowerCamelCase , lowerCamelCase , **lowerCamelCase )
a__ = kwargs.pop("""code_revision""" , lowerCamelCase )
if os.path.isdir(lowerCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCamelCase , **lowerCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCamelCase , **lowerCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
a__ = FEATURE_EXTRACTOR_MAPPING[type(lowerCamelCase )]
return feature_extractor_class.from_dict(lowerCamelCase , **lowerCamelCase )
raise ValueError(
f'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
f'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def _A ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(lowerCamelCase , lowerCamelCase )
| 412 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """deit"""
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-12 , A_=224 , A_=16 , A_=3 , A_=True , A_=16 , **A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = qkv_bias
UpperCamelCase = encoder_stride
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = version.parse("""1.11""")
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
return 1e-4
| 3 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__UpperCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
__UpperCamelCase : List[str] = """sshleifer/student_marian_en_ro_6_1"""
__UpperCamelCase : int = """sshleifer/tiny-mbart"""
@require_torch
class __SCREAMING_SNAKE_CASE( a_ ):
def lowerCAmelCase_ ( self: int , UpperCamelCase: Any=False , UpperCamelCase: Optional[Any]=None , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: List[Any]=True , ) -> Tuple:
snake_case__ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCamelCase , num_train_epochs=1 , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , predict_with_generate=UpperCamelCase , do_train=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , )
snake_case__ = TrainerState.load_from_json(os.path.join(UpperCamelCase , 'trainer_state.json' ) ).log_history
if not do_eval:
return
snake_case__ = [log for log in logs if 'eval_loss' in log.keys()]
snake_case__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , UpperCamelCase )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: Any ) -> int:
self.run_seqaseq_quick(distributed=UpperCamelCase )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: Tuple ) -> int:
self.run_seqaseq_quick(distributed=UpperCamelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: Any ) -> Any:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: int ) -> Tuple:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=UpperCamelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=UpperCamelCase , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=UpperCamelCase )
@require_apex
@require_torch_gpu
def lowerCAmelCase_ ( self: Tuple ) -> Any:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] ) -> str:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case__ = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
snake_case__ = experiments[experiment_id]
snake_case__ = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
snake_case__ = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCamelCase , extra_args_str=data['extra_args_str'] )
snake_case__ = len(re.findall(UpperCamelCase , cl.err ) )
self.assertEqual(UpperCamelCase , data['n_matches'] )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case__ = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=UpperCamelCase , )
# Check metrics
snake_case__ = TrainerState.load_from_json(os.path.join(UpperCamelCase , 'trainer_state.json' ) ).log_history
snake_case__ = [log for log in logs if 'eval_loss' in log.keys()]
snake_case__ = eval_metrics[0]
snake_case__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , UpperCamelCase )
# test if do_predict saves generations and metrics
snake_case__ = os.listdir(UpperCamelCase )
snake_case__ = {os.path.basename(UpperCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase_ ( self: int ) -> int:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCamelCase: str ) -> Tuple[int, float]:
snake_case__ = '--skip_memory_metrics 0'
snake_case__ = self.run_trainer(
max_len=1_28 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=UpperCamelCase , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , n_gpus_to_use=1 , )
# Check metrics
snake_case__ = TrainerState.load_from_json(Path(UpperCamelCase , 'trainer_state.json' ) ).log_history
snake_case__ = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
snake_case__ = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
snake_case__ = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCamelCase , UpperCamelCase , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
UpperCamelCase , UpperCamelCase , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
UpperCamelCase , UpperCamelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: float = 3e-3 , UpperCamelCase: str = "adafactor" , UpperCamelCase: bool = False , UpperCamelCase: str = None , UpperCamelCase: int = 0 , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: int = None , ) -> Union[str, Any]:
snake_case__ = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(UpperCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(UpperCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case__ = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(UpperCamelCase )}
'''.split()
snake_case__ = '\n --do_predict\n '.split()
snake_case__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ = get_gpu_count()
snake_case__ = get_torch_dist_unique_port()
snake_case__ = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase , env=self.get_env() )
else:
snake_case__ = ['run_translation.py'] + args
with patch.object(UpperCamelCase , 'argv' , UpperCamelCase ):
main()
return output_dir
| 328 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = jnp.floataa
SCREAMING_SNAKE_CASE_ = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setup()
__lowerCamelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
__lowerCamelCase = super().__call__(*_snake_case , **_snake_case )
__lowerCamelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxBigBirdForNaturalQuestionsModule
def lowerCamelCase_ ( A_ , A_ , A_ , A_ , A_ , A_ ):
def cross_entropy(A_ , A_ , A_=None ):
__lowerCamelCase = logits.shape[-1]
__lowerCamelCase = (labels[..., None] == jnp.arange(A_ )[None]).astype('''f4''' )
__lowerCamelCase = jax.nn.log_softmax(A_ , axis=-1 )
__lowerCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowerCamelCase = reduction(A_ )
return loss
__lowerCamelCase = partial(A_ , reduction=jnp.mean )
__lowerCamelCase = cross_entropy(A_ , A_ )
__lowerCamelCase = cross_entropy(A_ , A_ )
__lowerCamelCase = cross_entropy(A_ , A_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'google/bigbird-roberta-base'
SCREAMING_SNAKE_CASE_ = 3_000
SCREAMING_SNAKE_CASE_ = 10_500
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 5
# tx_args
SCREAMING_SNAKE_CASE_ = 3E-5
SCREAMING_SNAKE_CASE_ = 0.0
SCREAMING_SNAKE_CASE_ = 20_000
SCREAMING_SNAKE_CASE_ = 0.0095
SCREAMING_SNAKE_CASE_ = 'bigbird-roberta-natural-questions'
SCREAMING_SNAKE_CASE_ = 'training-expt'
SCREAMING_SNAKE_CASE_ = 'data/nq-training.jsonl'
SCREAMING_SNAKE_CASE_ = 'data/nq-validation.jsonl'
def _lowerCamelCase ( self ):
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=_snake_case )
__lowerCamelCase = os.path.join(self.base_dir , self.save_dir )
__lowerCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 4_096 # no dynamic padding on TPUs
def __call__( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.collate_fn(_snake_case )
__lowerCamelCase = jax.tree_util.tree_map(_snake_case , _snake_case )
return batch
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.fetch_inputs(features['''input_ids'''] )
__lowerCamelCase = {
'''input_ids''': jnp.array(_snake_case , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(_snake_case , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = [self._fetch_inputs(_snake_case ) for ids in input_ids]
return zip(*_snake_case )
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = [1 for _ in range(len(_snake_case ) )]
while len(_snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowerCamelCase_ ( A_ , A_ , A_=None ):
if seed is not None:
__lowerCamelCase = dataset.shuffle(seed=A_ )
for i in range(len(A_ ) // batch_size ):
__lowerCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(A_ )
@partial(jax.pmap , axis_name='''batch''' )
def lowerCamelCase_ ( A_ , A_ , **A_ ):
def loss_fn(A_ ):
__lowerCamelCase = model_inputs.pop('''start_labels''' )
__lowerCamelCase = model_inputs.pop('''end_labels''' )
__lowerCamelCase = model_inputs.pop('''pooled_labels''' )
__lowerCamelCase = state.apply_fn(**A_ , params=A_ , dropout_rng=A_ , train=A_ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = outputs
return state.loss_fn(
A_ , A_ , A_ , A_ , A_ , A_ , )
__lowerCamelCase , __lowerCamelCase = jax.random.split(A_ )
__lowerCamelCase = jax.value_and_grad(A_ )
__lowerCamelCase , __lowerCamelCase = grad_fn(state.params )
__lowerCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
__lowerCamelCase = jax.lax.pmean(A_ , '''batch''' )
__lowerCamelCase = state.apply_gradients(grads=A_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def lowerCamelCase_ ( A_ , **A_ ):
__lowerCamelCase = model_inputs.pop('''start_labels''' )
__lowerCamelCase = model_inputs.pop('''end_labels''' )
__lowerCamelCase = model_inputs.pop('''pooled_labels''' )
__lowerCamelCase = state.apply_fn(**A_ , params=state.params , train=A_ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = outputs
__lowerCamelCase = state.loss_fn(A_ , A_ , A_ , A_ , A_ , A_ )
__lowerCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class _SCREAMING_SNAKE_CASE ( train_state.TrainState ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = struct.field(pytree_node=UpperCamelCase )
@dataclass
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
__lowerCamelCase = model.params
__lowerCamelCase = TrainState.create(
apply_fn=model.__call__ , params=_snake_case , tx=_snake_case , loss_fn=_snake_case , )
if ckpt_dir is not None:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = restore_checkpoint(_snake_case , _snake_case )
__lowerCamelCase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__lowerCamelCase , __lowerCamelCase = build_tx(**_snake_case )
__lowerCamelCase = train_state.TrainState(
step=_snake_case , apply_fn=model.__call__ , params=_snake_case , tx=_snake_case , opt_state=_snake_case , )
__lowerCamelCase = args
__lowerCamelCase = data_collator
__lowerCamelCase = lr
__lowerCamelCase = params
__lowerCamelCase = jax_utils.replicate(_snake_case )
return state
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.args
__lowerCamelCase = len(_snake_case ) // args.batch_size
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.split(_snake_case , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCamelCase = get_batched_dataset(_snake_case , args.batch_size , seed=_snake_case )
__lowerCamelCase = 0
for batch in tqdm(_snake_case , total=_snake_case , desc=F'''Running EPOCH-{epoch}''' ):
__lowerCamelCase = self.data_collator(_snake_case )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.train_step_fn(_snake_case , _snake_case , **_snake_case )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__lowerCamelCase = jax_utils.unreplicate(state.step )
__lowerCamelCase = running_loss.item() / i
__lowerCamelCase = self.scheduler_fn(state_step - 1 )
__lowerCamelCase = self.evaluate(_snake_case , _snake_case )
__lowerCamelCase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(_snake_case ) )
self.logger.log(_snake_case , commit=_snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=_snake_case )
def _lowerCamelCase ( self , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = get_batched_dataset(_snake_case , self.args.batch_size )
__lowerCamelCase = len(_snake_case ) // self.args.batch_size
__lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCamelCase = 0
for batch in tqdm(_snake_case , total=_snake_case , desc='''Evaluating ... ''' ):
__lowerCamelCase = self.data_collator(_snake_case )
__lowerCamelCase = self.val_step_fn(_snake_case , **_snake_case )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def _lowerCamelCase ( self , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = jax_utils.unreplicate(_snake_case )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' )
self.model_save_fn(_snake_case , params=state.params )
with open(os.path.join(_snake_case , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_snake_case , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(_snake_case , '''data_collator.joblib''' ) )
with open(os.path.join(_snake_case , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , _snake_case )
print('''DONE''' )
def lowerCamelCase_ ( A_ , A_ ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(A_ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
__lowerCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(A_ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
__lowerCamelCase = from_bytes(state.opt_state , f.read() )
__lowerCamelCase = joblib.load(os.path.join(A_ , '''args.joblib''' ) )
__lowerCamelCase = joblib.load(os.path.join(A_ , '''data_collator.joblib''' ) )
with open(os.path.join(A_ , '''training_state.json''' ) , '''r''' ) as f:
__lowerCamelCase = json.load(A_ )
__lowerCamelCase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = num_train_steps - warmup_steps
__lowerCamelCase = optax.linear_schedule(init_value=A_ , end_value=A_ , transition_steps=A_ )
__lowerCamelCase = optax.linear_schedule(init_value=A_ , end_value=1e-7 , transition_steps=A_ )
__lowerCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCamelCase_ ( A_ , A_ , A_ , A_ , A_ ):
def weight_decay_mask(A_ ):
__lowerCamelCase = traverse_util.flatten_dict(A_ )
__lowerCamelCase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(A_ )
__lowerCamelCase = scheduler_fn(A_ , A_ , A_ , A_ )
__lowerCamelCase = optax.adamw(learning_rate=A_ , weight_decay=A_ , mask=A_ )
return tx, lr
| 721 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase : Tuple ={
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] =[
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : int =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 575 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Optional[int] = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 447 |
import os
def __SCREAMING_SNAKE_CASE ( ) -> int:
with open(os.path.dirname(lowerCAmelCase ) + "/grid.txt" ) as f:
_UpperCAmelCase : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCAmelCase ) for x in f.readline().split()] )
_UpperCAmelCase : str = 0
# right
for i in range(20 ):
for j in range(17 ):
_UpperCAmelCase : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_UpperCAmelCase : List[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
_UpperCAmelCase : Dict = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_UpperCAmelCase : Optional[int] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_UpperCAmelCase : Optional[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_UpperCAmelCase : Dict = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_UpperCAmelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_UpperCAmelCase : Tuple = temp
return maximum
if __name__ == "__main__":
print(solution())
| 300 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_( a__ ):
"""simple docstring"""
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , __lowerCAmelCase , )
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Optional[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : List[Any] = image[0].size
SCREAMING_SNAKE_CASE : Dict = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE : int = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE : Dict = np.concatenate(__lowerCAmelCase , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : List[Any] = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE : Dict = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(__lowerCAmelCase , dim=0 )
return image
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Optional[Any] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Union[str, Any] = mask[0].size
SCREAMING_SNAKE_CASE : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE : Dict = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE : int = np.concatenate(__lowerCAmelCase , axis=0 )
SCREAMING_SNAKE_CASE : List[Any] = mask.astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(__lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(__lowerCAmelCase , dim=0 )
return mask
class a_ ( UpperCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : UNetaDModel
__SCREAMING_SNAKE_CASE : RePaintScheduler
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->Any:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 250 , _lowerCamelCase = 0.0 , _lowerCamelCase = 10 , _lowerCamelCase = 10 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ) ->Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE : str = image
SCREAMING_SNAKE_CASE : List[Any] = _preprocess_image(_a )
SCREAMING_SNAKE_CASE : int = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : Optional[int] = _preprocess_mask(_a )
SCREAMING_SNAKE_CASE : Optional[Any] = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : Optional[int] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_a )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE : int = original_image.shape
SCREAMING_SNAKE_CASE : List[str] = randn_tensor(_a , generator=_a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_a , _a , _a , self.device )
SCREAMING_SNAKE_CASE : Dict = eta
SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = generator[0] if isinstance(_a , _a ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE : Tuple = self.unet(_a , _a ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(_a , _a , _a , _a , _a , _a ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.undo_step(_a , _a , _a )
SCREAMING_SNAKE_CASE : int = t
SCREAMING_SNAKE_CASE : int = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Tuple = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 712 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = (DDPMScheduler,)
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->int:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def __lowerCAmelCase ( self ) ->int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : str = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : int = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE : str = pred_prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Dict = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
for i, timestep in enumerate(_lowerCamelCase ):
if i == len(_lowerCamelCase ) - 1:
SCREAMING_SNAKE_CASE : int = -1
else:
SCREAMING_SNAKE_CASE : List[Any] = timesteps[i + 1]
SCREAMING_SNAKE_CASE : int = scheduler.previous_timestep(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = prev_t.item()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowerCamelCase )
with self.assertRaises(_lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
| 333 | 0 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
) )
return embed
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = []
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = '''imagenet-1k-id2label.json'''
A_ = 1000
A_ = '''huggingface/label-files'''
A_ = num_labels
A_ = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
A_ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = A_ = CvtConfig(num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
A_ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
A_ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A_ = [2, 2, 20]
A_ = [3, 12, 16]
A_ = [192, 768, 1024]
A_ = CvtForImageClassification(SCREAMING_SNAKE_CASE )
A_ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
A_ = image_size
A_ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
A_ = OrderedDict()
A_ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A_ = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE )
A_ = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
A_ = list_of_state_dict + attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
A_ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__lowercase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 203 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__lowercase = logging.getLogger(__name__)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if os.path.exists(SCREAMING_SNAKE_CASE ):
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
else:
os.makedirs(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
A_ = 2
if unlogit:
A_ = torch.pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = p * torch.log(SCREAMING_SNAKE_CASE )
A_ = 0
return -plogp.sum(dim=-1 )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
logger.info('''lv, h >\t''' + '''\t'''.join(f"{x + 1}" for x in range(len(SCREAMING_SNAKE_CASE ) ) ) )
for row in range(len(SCREAMING_SNAKE_CASE ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:d}" for x in tensor[row].cpu().data ) )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
A_ ,A_ = model.config.num_hidden_layers, model.config.num_attention_heads
A_ = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(args.device )
A_ = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(args.device )
if head_mask is None:
A_ = torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(args.device )
head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
A_ = None
A_ = 0.0
A_ = 0.0
for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
A_ = tuple(t.to(args.device ) for t in inputs )
((A_) ,) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
A_ = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
A_ ,A_ ,A_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(SCREAMING_SNAKE_CASE ):
A_ = entropy(attn.detach() , SCREAMING_SNAKE_CASE )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
A_ = 2
A_ = torch.pow(torch.pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
A_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(SCREAMING_SNAKE_CASE )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(SCREAMING_SNAKE_CASE )
logger.info('''Head ranked by importance scores''' )
A_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
A_ = torch.arange(
head_importance.numel() , device=args.device )
A_ = head_ranks.view_as(SCREAMING_SNAKE_CASE )
print_ad_tensor(SCREAMING_SNAKE_CASE )
return attn_entropy, head_importance, total_loss
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ ,A_ ,A_ = compute_heads_importance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE )
A_ = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , SCREAMING_SNAKE_CASE , original_score * args.masking_threshold )
A_ = torch.ones_like(SCREAMING_SNAKE_CASE )
A_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
A_ = original_score
while current_score >= original_score * args.masking_threshold:
A_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
A_ = float('''Inf''' )
A_ = head_importance.view(-1 ).sort()[1]
if len(SCREAMING_SNAKE_CASE ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
A_ = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
A_ = new_head_mask.view(-1 )
A_ = 0.0
A_ = new_head_mask.view_as(SCREAMING_SNAKE_CASE )
A_ = new_head_mask.clone().detach()
print_ad_tensor(SCREAMING_SNAKE_CASE )
# Compute metric and head importance again
A_ ,A_ ,A_ = compute_heads_importance(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE )
A_ = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(SCREAMING_SNAKE_CASE )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = datetime.now()
A_ ,A_ ,A_ = compute_heads_importance(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE , compute_importance=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE )
A_ = 1 / loss
A_ = datetime.now() - before_time
A_ = sum(p.numel() for p in model.parameters() )
A_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE ) )
}
for k, v in heads_to_prune.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ = [
v,
]
assert sum(len(SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(SCREAMING_SNAKE_CASE )
A_ = sum(p.numel() for p in model.parameters() )
A_ = datetime.now()
A_ ,A_ ,A_ = compute_heads_importance(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE , compute_importance=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , actually_pruned=SCREAMING_SNAKE_CASE , )
A_ = 1 / loss
A_ = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(SCREAMING_SNAKE_CASE , args.output_dir )
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=SCREAMING_SNAKE_CASE , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=SCREAMING_SNAKE_CASE , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=SCREAMING_SNAKE_CASE , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=SCREAMING_SNAKE_CASE , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=SCREAMING_SNAKE_CASE , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=SCREAMING_SNAKE_CASE , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=SCREAMING_SNAKE_CASE , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE , default=42 )
parser.add_argument('''--local_rank''' , type=SCREAMING_SNAKE_CASE , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=SCREAMING_SNAKE_CASE , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=SCREAMING_SNAKE_CASE , default='''''' , help='''Can be used for distant debugging.''' )
A_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
A_ = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
A_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
A_ = torch.device('''cuda''' , args.local_rank )
A_ = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
A_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
A_ = nn.parallel.DistributedDataParallel(
SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE )
elif args.n_gpu > 1:
A_ = nn.DataParallel(SCREAMING_SNAKE_CASE )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Prepare dataset
A_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
A_ = (torch.from_numpy(SCREAMING_SNAKE_CASE ),)
A_ = TensorDataset(*SCREAMING_SNAKE_CASE )
A_ = RandomSampler(SCREAMING_SNAKE_CASE )
A_ = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
A_ = mask_heads(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
prune_heads(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 203 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case_ = 'pt'
elif is_tf_available():
snake_case_ = 'tf'
else:
snake_case_ = 'jax'
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = ByTaTokenizer
_A = False
def __lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def __lowerCamelCase ( self , **lowercase__ ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__=False , lowercase__=20 , lowercase__=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for i in range(len(lowercase__ ) ):
try:
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ : str = list(filter(lambda lowercase__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , lowercase__ ) )
SCREAMING_SNAKE_CASE_ : int = list(filter(lambda lowercase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase__ ) , lowercase__ ) )
if max_length is not None and len(lowercase__ ) > max_length:
SCREAMING_SNAKE_CASE_ : int = toks[:max_length]
if min_length is not None and len(lowercase__ ) < min_length and len(lowercase__ ) > 0:
while len(lowercase__ ) < min_length:
SCREAMING_SNAKE_CASE_ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ : Tuple = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(lowercase__ , clean_up_tokenization_spaces=lowercase__ )
if " " not in output_txt and len(lowercase__ ) > 1:
SCREAMING_SNAKE_CASE_ : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase__ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ : List[str] = " " + output_txt
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
return output_txt, output_ids
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : str = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : int = "Unicode €."
SCREAMING_SNAKE_CASE_ : int = tokenizer(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , lowercase__ )
# decoding
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , "Unicode €.</s>" )
SCREAMING_SNAKE_CASE_ : int = tokenizer("e è é ê ë" )
SCREAMING_SNAKE_CASE_ : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , lowercase__ )
# decoding
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ : str = tokenizer(lowercase__ , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ : Tuple = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase__ , lowercase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE_ : str = tokenizer(lowercase__ , padding=lowercase__ , return_tensors=lowercase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , lowercase__ )
self.assertIn("attention_mask" , lowercase__ )
self.assertNotIn("decoder_input_ids" , lowercase__ )
self.assertNotIn("decoder_attention_mask" , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : Any = [
"Summary of the text.",
"Another summary.",
]
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(
text_target=lowercase__ , max_length=32 , padding="max_length" , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ : Tuple = ["A long paragraph for summarization. </s>"]
SCREAMING_SNAKE_CASE_ : List[str] = ["Summary of the text. </s>"]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ : str = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(lowercase__ , text_target=lowercase__ )
self.assertEqual(lowercase__ , batch["input_ids"][0] )
self.assertEqual(lowercase__ , batch["labels"][0] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : int = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
tokenizer.save_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.__class__.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = after_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
shutil.rmtree(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
tokenizer.save_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.__class__.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = after_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.__class__.from_pretrained(lowercase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ : str = json.load(lowercase__ )
with open(os.path.join(lowercase__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ : Optional[int] = json.load(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = [F"<extra_id_{i}>" for i in range(125 )]
SCREAMING_SNAKE_CASE_ : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
SCREAMING_SNAKE_CASE_ : Any = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(lowercase__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowercase__ , lowercase__ )
with open(os.path.join(lowercase__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowercase__ , lowercase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_class.from_pretrained(
lowercase__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=lowercase__ )]
SCREAMING_SNAKE_CASE_ : int = tokenizer_class.from_pretrained(
lowercase__ , additional_special_tokens=lowercase__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_class.from_pretrained(lowercase__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizers(fast=lowercase__ , do_lower_case=lowercase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE_ : List[Any] = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
SCREAMING_SNAKE_CASE_ : Any = tokenizer.convert_tokens_to_string(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : int = tokenizer.convert_ids_to_tokens(
lowercase__ , skip_special_tokens=lowercase__ )
for attr in attributes_list:
setattr(lowercase__ , attr + "_id" , lowercase__ )
self.assertEqual(getattr(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(getattr(lowercase__ , attr + "_id" ) , lowercase__ )
setattr(lowercase__ , attr + "_id" , lowercase__ )
self.assertEqual(getattr(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(getattr(lowercase__ , attr + "_id" ) , lowercase__ )
setattr(lowercase__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(lowercase__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(lowercase__ , "additional_special_tokens_ids" ) , [] )
setattr(lowercase__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowercase__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowercase__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 710 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__=2 , lowercase__=3 , lowercase__=4 , lowercase__=2 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=36 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=6 , lowercase__=6 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=1000 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = coordinate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = shape_size
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE_ : Tuple = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE_ : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : Dict = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Tuple = tmp_coordinate
SCREAMING_SNAKE_CASE_ : Dict = tf.constant(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFLayoutLMvaModel(config=lowercase__ )
# text + image
SCREAMING_SNAKE_CASE_ : int = model(lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , training=lowercase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ : int = model({"pixel_values": pixel_values} , training=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFLayoutLMvaForTokenClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 2
SCREAMING_SNAKE_CASE_ : List[Any] = TFLayoutLMvaForQuestionAnswering(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , training=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_), (SCREAMING_SNAKE_CASE_)) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
return True
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowercase__ )
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = {
k: tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowercase__ )
if getattr(lowercase__ , "hf_compute_loss" , lowercase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase__ )[0]
]
SCREAMING_SNAKE_CASE_ : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class.pop("input_ids" )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE_ : str = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE_ : str = -100
SCREAMING_SNAKE_CASE_ : str = tf.convert_to_tensor(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ , **lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowercase__ , return_labels=lowercase__ )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE_ : int = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE_ : Tuple = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE_ : List[Any] = {0: "input_ids"}
for label_key in label_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = signature_names.index(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = label_key
SCREAMING_SNAKE_CASE_ : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE_ : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE_ : List[str] = prepared_for_class[value]
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(lowercase__ )
# Send to model
SCREAMING_SNAKE_CASE_ : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : List[str] = type
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=lowercase__ , return_tensors="tf" ).pixel_values
SCREAMING_SNAKE_CASE_ : Dict = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE_ : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , training=lowercase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
| 68 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
SCREAMING_SNAKE_CASE__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _a ( lowerCamelCase_ ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def _a ( lowerCamelCase_ ):
snake_case : Union[str, Any] =np.max(_outputs , axis=-1 , keepdims=lowerCamelCase_ )
snake_case : Optional[int] =np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCamelCase_ )
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 'sigmoid'
__UpperCAmelCase = 'softmax'
__UpperCAmelCase = 'none'
@add_end_docstrings(
a_ , R'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = False
__UpperCAmelCase = ClassificationFunction.NONE
def __init__( self : List[str], **_snake_case : List[Any] ):
'''simple docstring'''
super().__init__(**_snake_case )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __snake_case ( self : List[Any], _snake_case : str=None, _snake_case : Union[str, Any]=None, _snake_case : Optional[int]="", **_snake_case : List[Any] ):
'''simple docstring'''
snake_case : int =tokenizer_kwargs
snake_case : str ={}
if hasattr(self.model.config, '''return_all_scores''' ) and return_all_scores is None:
snake_case : int =self.model.config.return_all_scores
if isinstance(_snake_case, _snake_case ) or top_k is None:
snake_case : int =top_k
snake_case : Optional[Any] =False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''', _snake_case, )
if return_all_scores:
snake_case : List[Any] =None
else:
snake_case : Dict =1
if isinstance(_snake_case, _snake_case ):
snake_case : List[str] =ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
snake_case : Tuple =function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple, *_snake_case : Union[str, Any], **_snake_case : int ):
'''simple docstring'''
snake_case : Dict =super().__call__(*_snake_case, **_snake_case )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
snake_case : Optional[int] ='''top_k''' not in kwargs
if isinstance(args[0], _snake_case ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __snake_case ( self : List[Any], _snake_case : List[str], **_snake_case : Dict ):
'''simple docstring'''
snake_case : Optional[Any] =self.framework
if isinstance(_snake_case, _snake_case ):
return self.tokenizer(**_snake_case, return_tensors=_snake_case, **_snake_case )
elif isinstance(_snake_case, _snake_case ) and len(_snake_case ) == 1 and isinstance(inputs[0], _snake_case ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0], text_pair=inputs[0][1], return_tensors=_snake_case, **_snake_case )
elif isinstance(_snake_case, _snake_case ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(_snake_case, return_tensors=_snake_case, **_snake_case )
def __snake_case ( self : Tuple, _snake_case : Union[str, Any] ):
'''simple docstring'''
return self.model(**_snake_case )
def __snake_case ( self : Tuple, _snake_case : Optional[int], _snake_case : str=None, _snake_case : Any=1, _snake_case : Optional[int]=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
snake_case : Tuple =ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
snake_case : str =ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, '''function_to_apply''' ) and function_to_apply is None:
snake_case : Tuple =self.model.config.function_to_apply
else:
snake_case : Optional[Any] =ClassificationFunction.NONE
snake_case : List[str] =model_outputs['''logits'''][0]
snake_case : Union[str, Any] =outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
snake_case : Optional[int] =sigmoid(_snake_case )
elif function_to_apply == ClassificationFunction.SOFTMAX:
snake_case : Optional[Any] =softmax(_snake_case )
elif function_to_apply == ClassificationFunction.NONE:
snake_case : Union[str, Any] =outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
snake_case : int =[
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(_snake_case )
]
if not _legacy:
dict_scores.sort(key=lambda _snake_case : x["score"], reverse=_snake_case )
if top_k is not None:
snake_case : List[Any] =dict_scores[:top_k]
return dict_scores
| 349 | 0 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowerCamelCase : Any = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase ( nn.Module ):
def __init__(self : Tuple , A__ : Any ) -> str:
super().__init__()
lowercase = torchvision.models.resnetaaa(pretrained=A__ )
lowercase = list(model.children() )[:-2]
lowercase = nn.Sequential(*A__ )
lowercase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCAmelCase__ (self : List[str] , A__ : Optional[Any] ) -> Optional[int]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowercase = self.pool(self.model(A__ ) )
lowercase = torch.flatten(A__ , start_dim=2 )
lowercase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase ( _lowercase ):
def __init__(self : int , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[Any] , A__ : Tuple , A__ : Optional[int] ) -> Union[str, Any]:
lowercase = [json.loads(A__ ) for l in open(A__ )]
lowercase = os.path.dirname(A__ )
lowercase = tokenizer
lowercase = labels
lowercase = len(A__ )
lowercase = max_seq_length
lowercase = transforms
def __len__(self : List[str] ) -> Dict:
return len(self.data )
def __getitem__(self : int , A__ : Union[str, Any] ) -> List[str]:
lowercase = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=A__ ) )
lowercase , lowercase , lowercase = sentence[0], sentence[1:-1], sentence[-1]
lowercase = sentence[: self.max_seq_length]
lowercase = torch.zeros(self.n_classes )
lowercase = 1
lowercase = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
lowercase = self.transforms(A__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase__ (self : str ) -> str:
lowercase = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = [len(row["sentence"] ) for row in batch]
lowercase , lowercase = len(lowerCAmelCase_ ), max(lowerCAmelCase_ )
lowercase = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long )
lowercase = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ):
lowercase = input_row["sentence"]
lowercase = 1
lowercase = torch.stack([row["image"] for row in batch] )
lowercase = torch.stack([row["label"] for row in batch] )
lowercase = torch.stack([row["image_start_token"] for row in batch] )
lowercase = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCAmelCase_ ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCAmelCase_ ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 459 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__(self : Dict , A__ : str , A__ : int=3 , A__ : Dict=3_2 , A__ : str=3 , A__ : str=1_0 , A__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] , A__ : Tuple=[1, 1, 2, 1] , A__ : int=True , A__ : List[Any]=True , A__ : List[Any]="relu" , A__ : Any=3 , A__ : Any=None , ) -> Tuple:
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(A__ )
def UpperCAmelCase__ (self : str ) -> List[str]:
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self : Optional[Any] ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase__ (self : Dict , A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict ) -> str:
lowercase = RegNetModel(config=A__ )
model.to(A__ )
model.eval()
lowercase = model(A__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase__ (self : List[str] , A__ : List[str] , A__ : Union[str, Any] , A__ : str ) -> Dict:
lowercase = self.num_labels
lowercase = RegNetForImageClassification(A__ )
model.to(A__ )
model.eval()
lowercase = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self : Any ) -> Union[str, Any]:
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
UpperCAmelCase : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCAmelCase : Dict = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Dict = False
UpperCAmelCase : int = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Tuple = False
def UpperCAmelCase__ (self : Optional[int] ) -> Tuple:
lowercase = RegNetModelTester(self )
lowercase = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def UpperCAmelCase__ (self : List[Any] ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ (self : Optional[Any] ) -> int:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase__ (self : int ) -> Optional[int]:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase__ (self : List[Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ (self : Any ) -> str:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCAmelCase__ (self : List[str] ) -> Tuple:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(config=A__ )
for name, module in model.named_modules():
if isinstance(A__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def UpperCAmelCase__ (self : Optional[Any] ) -> List[str]:
def check_hidden_states_output(A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ):
lowercase = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(A__ , A__ ) )
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(A__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase = layer_type
lowercase = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(A__ , A__ , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> Tuple:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCAmelCase__ (self : Tuple ) -> Any:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = RegNetModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ (self : Optional[int] ) -> Union[str, Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ (self : List[str] ) -> int:
lowercase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A__ )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ , return_tensors="pt" ).to(A__ )
# forward pass
with torch.no_grad():
lowercase = model(**A__ )
# verify the logits
lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A__ )
lowercase = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
| 459 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = []
for rt in rc.restypes:
SCREAMING_SNAKE_CASE : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
SCREAMING_SNAKE_CASE : Dict = {name: i for i, name in enumerate(_a)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
_a , dtype=torch.intaa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
_a , dtype=torch.intaa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
_a , dtype=torch.floataa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : Optional[Any] = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE : Any = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE : int = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE : List[str] = residx_atomaa_mask
SCREAMING_SNAKE_CASE : Tuple = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE : str = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
SCREAMING_SNAKE_CASE : Union[str, Any] = rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE : Optional[int] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE : Union[str, Any] = rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE : int = residx_atomaa_mask
return protein
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = tree_map(lambda _a: torch.tensor(_a , device=batch["aatype"].device) , _a , np.ndarray)
SCREAMING_SNAKE_CASE : int = tensor_tree_map(lambda _a: np.array(_a) , make_atomaa_masks(_a))
return out | 25 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int | float] , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if len(lowerCAmelCase_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case_ : List[Any] = (left + right) >> 1 # the middle
snake_case_ : Dict = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
snake_case_ : int = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , _lowerCamelCase , )
class _UpperCAmelCase ( _lowerCamelCase ):
a = RobertaConfig
a = '''roberta'''
def __init__( self , a__ ):
super().__init__(a__ )
A_ : Optional[Any] = RobertaEmbeddings(a__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , _lowerCamelCase , )
class _UpperCAmelCase ( _lowerCamelCase ):
a = RobertaConfig
a = '''roberta'''
def __init__( self , a__ ):
super().__init__(a__ )
A_ : Union[str, Any] = config.num_labels
A_ : List[str] = config.num_hidden_layers
A_ : Union[str, Any] = DeeRobertaModel(a__ )
A_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
A_ : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(a__ )
def _lowerCamelCase ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
A_ : int = self.num_layers
try:
A_ : Any = self.roberta(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
A_ : int = outputs[1]
A_ : Any = self.dropout(a__ )
A_ : List[str] = self.classifier(a__ )
A_ : List[str] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A_ : Any = e.message
A_ : Union[str, Any] = e.exit_layer
A_ : Any = outputs[0]
if not self.training:
A_ : Optional[Any] = entropy(a__ )
A_ : Optional[Any] = []
A_ : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A_ : Any = MSELoss()
A_ : Any = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A_ : Union[str, Any] = CrossEntropyLoss()
A_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A_ : Union[str, Any] = []
for highway_exit in outputs[-1]:
A_ : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A_ : List[Any] = MSELoss()
A_ : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A_ : Dict = CrossEntropyLoss()
A_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
A_ : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A_ : List[Any] = (loss,) + outputs
if not self.training:
A_ : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A_ : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 481 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _UpperCAmelCase :
def __init__( self , a__ ):
if isinstance(a__ , a__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
A_ : Optional[Any] = deepcopy(a__ )
elif os.path.exists(a__ ):
with io.open(a__ , """r""" , encoding="""utf-8""" ) as f:
A_ : str = json.load(a__ )
else:
try:
A_ : Dict = baseaa.urlsafe_baadecode(a__ ).decode("""utf-8""" )
A_ : List[Any] = json.loads(a__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
A_ : Any = config
self.set_stage_and_offload()
def _lowerCamelCase ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
A_ : List[str] = self.get_value("""zero_optimization.stage""" , -1 )
# offload
A_ : Any = False
if self.is_zeroa() or self.is_zeroa():
A_ : Optional[int] = set(["""cpu""", """nvme"""] )
A_ : Dict = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
A_ : Tuple = True
def _lowerCamelCase ( self , a__ ):
A_ : List[Any] = self.config
# find the config node of interest if it exists
A_ : Optional[Any] = ds_key_long.split(""".""" )
A_ : Union[str, Any] = nodes.pop()
for node in nodes:
A_ : List[str] = config.get(a__ )
if config is None:
return None, ds_key
return config, ds_key
def _lowerCamelCase ( self , a__ , a__=None ):
A_ , A_ : Union[str, Any] = self.find_config_node(a__ )
if config is None:
return default
return config.get(a__ , a__ )
def _lowerCamelCase ( self , a__ , a__=False ):
A_ : Union[str, Any] = self.config
# find the config node of interest if it exists
A_ : str = ds_key_long.split(""".""" )
for node in nodes:
A_ : int = config
A_ : int = config.get(a__ )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(a__ )
def _lowerCamelCase ( self , a__ ):
A_ : Optional[Any] = self.get_value(a__ )
return False if value is None else bool(a__ )
def _lowerCamelCase ( self , a__ ):
A_ : Optional[Any] = self.get_value(a__ )
return False if value is None else not bool(a__ )
def _lowerCamelCase ( self ):
return self._stage == 2
def _lowerCamelCase ( self ):
return self._stage == 3
def _lowerCamelCase ( self ):
return self._offload
class _UpperCAmelCase :
def __init__( self , a__ ):
A_ : Any = engine
def _lowerCamelCase ( self , a__ , **a__ ):
# runs backpropagation and handles mixed precision
self.engine.backward(a__ , **a__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(a__ , device_placement=a__ , scaler=a__ )
A_ : Dict = hasattr(self.optimizer , """overflow""" )
def _lowerCamelCase ( self , a__=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _lowerCamelCase ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _lowerCamelCase ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ , a__ ):
super().__init__(a__ , a__ )
def _lowerCamelCase ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _UpperCAmelCase :
def __init__( self , a__ , a__=0.001 , a__=0 , **a__ ):
A_ : List[str] = params
A_ : Any = lr
A_ : int = weight_decay
A_ : Optional[int] = kwargs
class _UpperCAmelCase :
def __init__( self , a__ , a__=None , a__=0 , **a__ ):
A_ : Union[str, Any] = optimizer
A_ : int = total_num_steps
A_ : Any = warmup_num_steps
A_ : int = kwargs
| 481 | 1 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase : List[str] = get_logger(__name__)
_lowerCAmelCase : Any = Path(__file__).parent / "model_card_template.md"
_lowerCAmelCase : Union[str, Any] = uuida().hex
_lowerCAmelCase : Union[str, Any] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : Dict = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : List[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[Dict, str, None] = None ) -> str:
'''simple docstring'''
_UpperCAmelCase : str = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
ua += "; " + user_agent
return ua
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Any:
'''simple docstring'''
if token is None:
_UpperCAmelCase : List[str] = HfFolder.get_token()
if organization is None:
_UpperCAmelCase : List[str] = whoami(UpperCamelCase__ )["name"]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(UpperCamelCase__ , "local_rank" ) and args.local_rank not in [-1, 0]:
return
_UpperCAmelCase : List[str] = args.hub_token if hasattr(UpperCamelCase__ , "hub_token" ) else None
_UpperCAmelCase : Union[str, Any] = get_full_repo_name(UpperCamelCase__ , token=UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCamelCase__ , model_name=UpperCamelCase__ , repo_name=UpperCamelCase__ , dataset_name=args.dataset_name if hasattr(UpperCamelCase__ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(UpperCamelCase__ , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase__ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase__ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCamelCase__ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCamelCase__ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCamelCase__ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCamelCase__ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCamelCase__ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(UpperCamelCase__ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCamelCase__ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
_UpperCAmelCase : Any = os.path.join(args.output_dir , "README.md" )
model_card.save(UpperCamelCase__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> List[str]:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
_UpperCAmelCase : List[str] = str(Path(UpperCamelCase__ ).as_posix() )
_UpperCAmelCase : int = re.search(R"snapshots/([^/]+)/" , UpperCamelCase__ )
if search is None:
return None
_UpperCAmelCase : int = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(UpperCamelCase__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase : Union[str, Any] = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
_lowerCAmelCase : int = os.path.join(hf_cache_home, "diffusers")
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
_UpperCAmelCase : Any = DIFFUSERS_CACHE
if old_cache_dir is None:
_UpperCAmelCase : Dict = old_diffusers_cache
_UpperCAmelCase : int = Path(UpperCamelCase__ ).expanduser()
_UpperCAmelCase : List[str] = Path(UpperCamelCase__ ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_UpperCAmelCase : List[str] = new_cache_dir / old_blob_path.relative_to(UpperCamelCase__ )
new_blob_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
os.replace(UpperCamelCase__ , UpperCamelCase__ )
try:
os.symlink(UpperCamelCase__ , UpperCamelCase__ )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
_lowerCAmelCase : str = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase : Tuple = int(f.read())
except ValueError:
_lowerCAmelCase : Optional[int] = 0
if cache_version < 1:
_lowerCAmelCase : Any = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase : str = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
"the directory exists and can be written to."
)
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> str:
'''simple docstring'''
if variant is not None:
_UpperCAmelCase : int = weights_name.split("." )
_UpperCAmelCase : Tuple = splits[:-1] + [variant] + splits[-1:]
_UpperCAmelCase : Any = ".".join(UpperCamelCase__ )
return weights_name
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict , *,
SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = str(UpperCamelCase__ )
if os.path.isfile(UpperCamelCase__ ):
return pretrained_model_name_or_path
elif os.path.isdir(UpperCamelCase__ ):
if os.path.isfile(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ):
# Load from a PyTorch checkpoint
_UpperCAmelCase : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ):
_UpperCAmelCase : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(UpperCamelCase__ ).base_version ) >= version.parse("0.20.0" )
):
try:
_UpperCAmelCase : Optional[Any] = hf_hub_download(
UpperCamelCase__ , filename=_add_variant(UpperCamelCase__ , UpperCamelCase__ ) , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , proxies=UpperCamelCase__ , resume_download=UpperCamelCase__ , local_files_only=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , user_agent=UpperCamelCase__ , subfolder=UpperCamelCase__ , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , UpperCamelCase__ , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCamelCase__ , UpperCamelCase__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(UpperCamelCase__ , UpperCamelCase__ )}\' so that the correct variant file can be added.' , UpperCamelCase__ , )
try:
# 2. Load model file as usual
_UpperCAmelCase : List[Any] = hf_hub_download(
UpperCamelCase__ , filename=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , proxies=UpperCamelCase__ , resume_download=UpperCamelCase__ , local_files_only=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , user_agent=UpperCamelCase__ , subfolder=UpperCamelCase__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'." )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. "
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 289 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Any=99 , lowerCamelCase__ : int=16 , lowerCamelCase__ : Tuple=5 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Optional[int]=36 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Union[str, Any]=512 , lowerCamelCase__ : Tuple=16 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : Optional[Any]=None , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.get_config()
__lowercase = 300
return config
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
__lowercase = MraModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , ) -> str:
"""simple docstring"""
__lowercase = True
__lowercase = MraModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MraForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ) -> Dict:
"""simple docstring"""
__lowercase = MraForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict ) -> str:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MraForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MraForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = MraForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : str = False
UpperCamelCase_ : int = ()
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MraModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : str ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Dict:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MraModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='''MRA does not output attentions''' )
def UpperCAmelCase_ ( self : Dict ) -> Any:
"""simple docstring"""
return
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowercase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(lowerCamelCase__ )[0]
__lowercase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowercase = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowercase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(lowerCamelCase__ )[0]
__lowercase = 50_265
__lowercase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowercase = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__lowercase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(lowerCamelCase__ )[0]
__lowercase = 50_265
__lowercase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowercase = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 332 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
__UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
__UpperCAmelCase : Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
__UpperCAmelCase : Optional[int] = field(
default=10000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
__UpperCAmelCase : Optional[float] = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} )
__UpperCAmelCase : Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
__UpperCAmelCase : Optional[int] = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
__UpperCAmelCase : Optional[int] = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
__UpperCAmelCase : Optional[bool] = field(
default=snake_case_ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
__UpperCAmelCase : Optional[int] = field(default=50000 , metadata={'''help''': '''Maximum number of training steps.'''} )
__UpperCAmelCase : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__UpperCAmelCase : Optional[int] = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
__UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} )
__UpperCAmelCase : Optional[int] = field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
__UpperCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
__UpperCAmelCase : Optional[bool] = field(default=snake_case_ , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__UpperCAmelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
__UpperCAmelCase : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__UpperCAmelCase : Optional[int] = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
__UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__UpperCAmelCase : Optional[int] = field(default=snake_case_ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
__UpperCAmelCase : Optional[int] = field(
default=snake_case_ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
__UpperCAmelCase : Optional[bool] = field(
default=snake_case_ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
__UpperCAmelCase : Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
__UpperCAmelCase : Optional[int] = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
__UpperCAmelCase : Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
__UpperCAmelCase : Optional[float] = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
__UpperCAmelCase : Optional[int] = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
__UpperCAmelCase : Optional[int] = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
__UpperCAmelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
__UpperCAmelCase : Optional[int] = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : Optional[int] = field(
default=snake_case_ , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
__UpperCAmelCase : Optional[str] = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
__UpperCAmelCase : Optional[int] = field(
default=100000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
__UpperCAmelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__UpperCAmelCase : Optional[float] = field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
__UpperCAmelCase : Optional[float] = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
__UpperCAmelCase : Optional[float] = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
__UpperCAmelCase : Optional[float] = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
__UpperCAmelCase : Optional[float] = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
__UpperCAmelCase : Optional[bool] = field(
default=snake_case_ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
__UpperCAmelCase : Optional[float] = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : Optional[str] = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
__UpperCAmelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__UpperCAmelCase : Optional[int] = field(default=200000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
__UpperCAmelCase : Optional[int] = field(
default=32768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
__UpperCAmelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
__UpperCAmelCase : Optional[bool] = field(default=snake_case_ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
__UpperCAmelCase : Optional[int] = field(default=snake_case_ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : Optional[str] = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
__UpperCAmelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
__UpperCAmelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
__UpperCAmelCase : Optional[bool] = field(default=snake_case_ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 117 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def __lowerCAmelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case : str = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Any = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : str = 16
elif accelerator.mixed_precision != "no":
snake_case : List[Any] = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
snake_case : Any = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
snake_case : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def __lowerCAmelCase ( lowercase : Dict , lowercase : int ) -> Tuple:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
snake_case : Union[str, Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
snake_case : str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
snake_case : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : str = config["lr"]
snake_case : Dict = int(config["num_epochs"] )
snake_case : int = int(config["seed"] )
snake_case : Tuple = int(config["batch_size"] )
set_seed(lowercase )
snake_case ,snake_case : List[Any] = get_dataloaders(lowercase , lowercase )
snake_case : Optional[Any] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case : Any = batch_size // MAX_GPU_BATCH_SIZE
snake_case : int = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : str = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Tuple = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
snake_case : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case ,snake_case ,snake_case ,snake_case ,snake_case : Union[str, Any] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
snake_case : str = os.path.split(lowercase )[-1].split("." )[0]
accelerator.init_trackers(lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
snake_case : Any = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : Dict = model(**lowercase )
snake_case : int = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
snake_case : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
snake_case : Optional[int] = model(**lowercase )
snake_case : Tuple = outputs.logits.argmax(dim=-1 )
snake_case ,snake_case : List[str] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
snake_case : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowercase ),
"epoch": epoch,
} , step=lowercase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
snake_case : int = parser.parse_args()
snake_case : List[str] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 117 | 1 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE_ = get_logger(__name__)
SCREAMING_SNAKE_CASE_ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class a :
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ) -> jnp.ndarray:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a :
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ) -> jnp.ndarray:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> jnp.ndarray:
for processor in self:
_UpperCAmelCase = inspect.signature(processor.__call__ ).parameters
if len(snake_case_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
_UpperCAmelCase = processor(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
else:
_UpperCAmelCase = processor(snake_case_ , snake_case_ , snake_case_ )
return scores
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ ) -> List[str]:
if not isinstance(snake_case_ , snake_case_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
_UpperCAmelCase = temperature
def __call__( self , snake_case_ , snake_case_ , snake_case_ ) -> jnp.ndarray:
_UpperCAmelCase = scores / self.temperature
return scores
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = -float("Inf" ) , snake_case_ = 1 ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(snake_case_ , snake_case_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
_UpperCAmelCase = top_p
_UpperCAmelCase = filter_value
_UpperCAmelCase = min_tokens_to_keep
def __call__( self , snake_case_ , snake_case_ , snake_case_ ) -> jnp.ndarray:
_UpperCAmelCase , _UpperCAmelCase = lax.top_k(snake_case_ , scores.shape[-1] )
_UpperCAmelCase = jnp.full_like(snake_case_ , self.filter_value )
_UpperCAmelCase = jax.nn.softmax(snake_case_ , axis=-1 ).cumsum(axis=-1 )
_UpperCAmelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_UpperCAmelCase = jnp.roll(snake_case_ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case_ )
# min tokens to keep
_UpperCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case_ )
_UpperCAmelCase = jnp.where(snake_case_ , snake_case_ , snake_case_ )
_UpperCAmelCase = jax.lax.sort_key_val(snake_case_ , snake_case_ )[-1]
return next_scores
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = -float("Inf" ) , snake_case_ = 1 ) -> List[str]:
if not isinstance(snake_case_ , snake_case_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
_UpperCAmelCase = max(snake_case_ , snake_case_ )
_UpperCAmelCase = filter_value
def __call__( self , snake_case_ , snake_case_ , snake_case_ ) -> jnp.ndarray:
_UpperCAmelCase , _UpperCAmelCase = scores.shape
_UpperCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value )
_UpperCAmelCase = min(self.top_k , scores.shape[-1] ) # Safety check
_UpperCAmelCase , _UpperCAmelCase = lax.top_k(snake_case_ , snake_case_ )
_UpperCAmelCase = jnp.broadcast_to((jnp.arange(snake_case_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_UpperCAmelCase = topk_scores.flatten()
_UpperCAmelCase = topk_indices.flatten() + shift
_UpperCAmelCase = next_scores_flat.at[topk_indices_flat].set(snake_case_ )
_UpperCAmelCase = next_scores_flat.reshape(snake_case_ , snake_case_ )
return next_scores
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ ) -> str:
_UpperCAmelCase = bos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ) -> jnp.ndarray:
_UpperCAmelCase = jnp.full(scores.shape , -float("inf" ) )
_UpperCAmelCase = 1 - jnp.bool_(cur_len - 1 )
_UpperCAmelCase = jnp.where(snake_case_ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case_ )
return scores
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ) -> List[str]:
_UpperCAmelCase = max_length
_UpperCAmelCase = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ) -> jnp.ndarray:
_UpperCAmelCase = jnp.full(scores.shape , -float("inf" ) )
_UpperCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_UpperCAmelCase = jnp.where(snake_case_ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case_ )
return scores
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ) -> List[str]:
if not isinstance(snake_case_ , snake_case_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(snake_case_ , snake_case_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
_UpperCAmelCase = min_length
_UpperCAmelCase = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
_UpperCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_UpperCAmelCase = jnp.where(snake_case_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , snake_case_ )
return scores
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ) -> List[Any]:
_UpperCAmelCase = list(snake_case_ )
_UpperCAmelCase = begin_index
def __call__( self , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
_UpperCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index )
_UpperCAmelCase = jnp.where(snake_case_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , snake_case_ )
return scores
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ ) -> Union[str, Any]:
_UpperCAmelCase = list(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ) -> jnp.ndarray:
_UpperCAmelCase = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ ) -> Tuple:
_UpperCAmelCase = dict(snake_case_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_UpperCAmelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_UpperCAmelCase = force_token_array.at[index].set(snake_case_ )
_UpperCAmelCase = jnp.intaa(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ) -> jnp.ndarray:
def _force_token(snake_case_ ):
_UpperCAmelCase = scores.shape[0]
_UpperCAmelCase = self.force_token_array[generation_idx]
_UpperCAmelCase = jnp.ones_like(snake_case_ , dtype=scores.dtype ) * -float("inf" )
_UpperCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_UpperCAmelCase = lax.dynamic_update_slice(snake_case_ , snake_case_ , (0, current_token) )
return new_scores
_UpperCAmelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case_ ) , lambda: scores , ) , )
return scores
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
_UpperCAmelCase = generate_config.eos_token_id
_UpperCAmelCase = generate_config.no_timestamps_token_id
_UpperCAmelCase = generate_config.no_timestamps_token_id + 1
_UpperCAmelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case_ , "max_initial_timestamp_index" ):
_UpperCAmelCase = generate_config.max_initial_timestamp_index
else:
_UpperCAmelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_UpperCAmelCase = model_config.vocab_size
def __call__( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
# suppress <|notimestamps|> which is handled by without_timestamps
_UpperCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(snake_case_ , snake_case_ ):
_UpperCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , snake_case_ , snake_case_ )
_UpperCAmelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case_ , )
_UpperCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , snake_case_ , snake_case_ )
_UpperCAmelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case_ , snake_case_ , )
return jnp.where(
snake_case_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , snake_case_ , )
_UpperCAmelCase = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
_UpperCAmelCase = jnp.where(cur_len == self.begin_index , snake_case_ , snake_case_ )
_UpperCAmelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case_ , )
_UpperCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index
_UpperCAmelCase = jnp.where(
snake_case_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , snake_case_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
_UpperCAmelCase = jax.nn.log_softmax(snake_case_ , axis=-1 )
def handle_cumulative_probs(snake_case_ , snake_case_ ):
_UpperCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_UpperCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , snake_case_ , )
_UpperCAmelCase = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
return scores
| 426 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
SCREAMING_SNAKE_CASE_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A__ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(A__ ) )
_UpperCAmelCase = os.path.join(A__ , "words.txt" )
_UpperCAmelCase = ""
with open(A__ ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
_UpperCAmelCase = [
word
for word in [sum(ord(A__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A__ )
if __name__ == "__main__":
print(solution())
| 426 | 1 |
"""simple docstring"""
from PIL import Image
def _A ( _a : Image , _a : float ):
"""simple docstring"""
def brightness(_a : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_a )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCAmelCase =change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 708 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ) -> Dict:
A = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
A = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
A = model(lowerCamelCase_ )["""last_hidden_state"""]
A = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape ,lowerCamelCase_ )
# compare the actual values for a slice.
A = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 255 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a ( metaclass=lowerCAmelCase_ ):
_snake_case : Dict = ['keras_nlp']
def __init__( self : str , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Union[str, Any] ):
requires_backends(self , ["""keras_nlp"""] )
| 277 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__:
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : List[Any]=[8, 16, 32, 64] , __SCREAMING_SNAKE_CASE : str=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]="relu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 3, 4] , __SCREAMING_SNAKE_CASE : int=1 , ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = out_indices
__SCREAMING_SNAKE_CASE = num_groups
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ) -> str:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = BitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=__SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _a ( self : int ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE = layer_type
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = BitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A__( unittest.TestCase ):
@cached_property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@require_torch
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase = BitConfig
lowerCAmelCase = False
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModelTester(self )
| 482 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = """deberta-v2"""
def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : Optional[int] = pad_token_id
snake_case_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowercase__ ) == str:
snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : Optional[int] = pos_att_type
snake_case_ : List[str] = vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ )
snake_case_ : List[str] = pooler_dropout
snake_case_ : int = pooler_hidden_act
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@property
def __UpperCamelCase (self ):
if self.task == "multiple-choice":
snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : int = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCamelCase (self ):
return 12
def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ):
snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 48 | 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__A : List[Any] = 10
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :list[int] , lowerCamelCase_ :int ):
'''simple docstring'''
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if array[i] == target:
return i
return -1
def UpperCAmelCase ( lowerCamelCase_ :list[int] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case_ : str = (left + right) // 3 + 1
snake_case_ : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
snake_case_ : Union[str, Any] = one_third - 1
elif array[two_third] < target:
snake_case_ : str = two_third + 1
else:
snake_case_ : Optional[int] = one_third + 1
snake_case_ : Union[str, Any] = two_third - 1
else:
return -1
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :list[int] , lowerCamelCase_ :int ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case_ : Optional[Any] = (left + right) // 3 + 1
snake_case_ : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE_ , one_third - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Tuple = input('Enter numbers separated by comma:\n').strip()
__A : str = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__A : Optional[Any] = int(input('Enter the number to be found in the list:\n').strip())
__A : Dict = ite_ternary_search(collection, target)
__A : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print('Not found') | 334 |
from __future__ import annotations
class snake_case__ :
def __init__( self , UpperCamelCase_ ) -> None:
"""simple docstring"""
a_ : Dict = order
# a_{0} ... a_{k}
a_ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
a_ : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
a_ : Tuple = [0.0] * self.order
# y[n-1] ... y[n-k]
a_ : List[Any] = [0.0] * self.order
def A ( self , UpperCamelCase_ , UpperCamelCase_ ) -> None:
"""simple docstring"""
if len(UpperCamelCase_ ) < self.order:
a_ : Optional[int] = [1.0, *a_coeffs]
if len(UpperCamelCase_ ) != self.order + 1:
a_ : Optional[Any] = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(UpperCamelCase_ )}"""
)
raise ValueError(UpperCamelCase_ )
if len(UpperCamelCase_ ) != self.order + 1:
a_ : Tuple = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(UpperCamelCase_ )}"""
)
raise ValueError(UpperCamelCase_ )
a_ : Any = a_coeffs
a_ : Tuple = b_coeffs
def A ( self , UpperCamelCase_ ) -> float:
"""simple docstring"""
a_ : List[str] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
a_ : Any = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
a_ : Union[str, Any] = self.input_history[:-1]
a_ : Dict = self.output_history[:-1]
a_ : List[str] = sample
a_ : int = result
return result
| 419 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def lowerCamelCase_ ( lowerCamelCase_ : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError()
| 79 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''vivit'''
def __init__( self : Tuple , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=[2, 16, 16] , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : List[Any]=30_72 , lowerCamelCase_ : List[str]="gelu_fast" , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[Any]=1e-06 , lowerCamelCase_ : Tuple=True , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = num_frames
SCREAMING_SNAKE_CASE : Optional[Any] = tubelet_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : int = qkv_bias
super().__init__(**lowerCamelCase_ )
| 79 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCAmelCase__ = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def a_ (__A = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__a : Dict = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__a : Tuple = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__a : Tuple = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F"Job {i:>2} is {job[0]} at {job[1]}")
| 351 |
def a_ (__A , __A , __A , __A ) -> int:
"""simple docstring"""
__a , __a : Any = len(__A ), len(grid[0] )
if (
min(__A , __A ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__a : Dict = 0
count += depth_first_search(__A , row + 1 , __A , __A )
count += depth_first_search(__A , row - 1 , __A , __A )
count += depth_first_search(__A , __A , col + 1 , __A )
count += depth_first_search(__A , __A , col - 1 , __A )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __UpperCAmelCase ( a_):
snake_case_ = os.path.join(args.tf_model_dir , 'parameters.json')
snake_case_ = json.loads(open(a_).read())
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''')
if not args.output.endswith('.pt'):
snake_case_ = args.output + '.pt'
snake_case_ = OrderedDict()
with tf.device('/CPU:0'):
snake_case_ = tf.train.load_checkpoint(args.tf_model_dir)
snake_case_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ = reader.get_tensor(a_).astype(np.floataa)
if key_name.endswith('/adam_m') or key_name.endswith('/adam_v'):
continue
if key_name.startswith('pasts/'):
if key_name.startswith('pasts/mlp'):
snake_case_ = int(key_name[9])
elif key_name.startswith('pasts/out'):
snake_case_ = 8
snake_case_ = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(a_)
elif key_name.startswith('model/moe'):
snake_case_ = int(key_name[9:].split('/')[0])
if key_name.endswith('/switch_gating/kernel'):
snake_case_ = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
snake_case_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(a_)
elif key_name.endswith('/softmlp/kernel'):
snake_case_ = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
snake_case_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(a_)
elif key_name.endswith('/wo/kernel') or key_name.endswith('/wi/kernel'):
snake_case_ = key_name[-9:-7]
for i in range(16):
snake_case_ = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
snake_case_ = (
vnp[i].transpose([1, 0]).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ = torch.tensor(a_)
elif key_name.startswith('model/mlp'):
snake_case_ = int(key_name[9:].split('/')[0])
if key_name.endswith('/p1/kernel'):
snake_case_ = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
snake_case_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(a_)
elif key_name.endswith('/p1/bias'):
snake_case_ = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(a_)
elif key_name.endswith('/p2/kernel'):
snake_case_ = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
snake_case_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(a_)
elif key_name.endswith('/p2/bias'):
snake_case_ = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(a_)
elif key_name.startswith('model/ln'):
snake_case_ = int(key_name[8:].split('/')[0])
if key_name.endswith('/b'):
snake_case_ = 'model.blocks.%d.feed_forward.norm.bias' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(a_)
elif key_name.endswith('/g'):
snake_case_ = 'model.blocks.%d.feed_forward.norm.weight' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(a_)
elif key_name.startswith('model/att'):
snake_case_ = int(key_name[9:].split('/')[0])
if key_name.endswith('/qkv/kernel'):
snake_case_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ = state[:, 0, :, :]
snake_case_ = state[:, 1, :, :]
snake_case_ = state[:, 2, :, :]
snake_case_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
snake_case_ = torch.tensor(a_)
snake_case_ = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
snake_case_ = torch.tensor(a_)
snake_case_ = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
snake_case_ = torch.tensor(a_)
elif key_name.endswith('/o/kernel'):
snake_case_ = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
snake_case_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(a_)
elif key_name.startswith('model/an'):
snake_case_ = int(key_name[8:].split('/')[0])
if key_name.endswith('/b'):
snake_case_ = 'model.blocks.%d.self_attn.norm.bias' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(a_)
elif key_name.endswith('/g'):
snake_case_ = 'model.blocks.%d.self_attn.norm.weight' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(a_)
elif (
key_name.startswith('model/wte')
or key_name.startswith('model/wpe')
or key_name.startswith('model/ete')
):
snake_case_ = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
snake_case_ = 'model.%s.weight' % nlayer
snake_case_ = vnp.copy() # same in embedded
snake_case_ = torch.tensor(a_)
if key_name.startswith('model/wte'):
snake_case_ = 'lm_head.weight'
snake_case_ = vnp.copy() # same in embedded
snake_case_ = torch.tensor(a_)
elif key_name.startswith('model/wob'):
snake_case_ = 'final_logits_bias'
snake_case_ = vnp.copy() # same in embedded
snake_case_ = state.reshape((1, -1))
snake_case_ = torch.tensor(a_)
elif key_name == "model/dense/kernel":
snake_case_ = 'model.last_project.weight'
snake_case_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(a_)
elif key_name == "model/dense_1/bias":
snake_case_ = 'model.last_project.bias'
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(a_)
torch.save(a_ , args.output)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
lowercase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 607 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase_ :
'''simple docstring'''
lowerCAmelCase = MBartConfig
lowerCAmelCase = {}
lowerCAmelCase = '''gelu'''
def __init__( self , a , a=13 , a=7 , a=True , a=False , a=99 , a=32 , a=2 , a=4 , a=37 , a=0.1 , a=0.1 , a=20 , a=2 , a=1 , a=0 , ) -> List[str]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
def _UpperCamelCase ( self ) -> str:
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ = prepare_mbart_inputs_dict(a , a , a )
return config, inputs_dict
def _UpperCamelCase ( self , a , a ) -> Dict:
snake_case_ = TFMBartModel(config=a ).get_decoder()
snake_case_ = inputs_dict['input_ids']
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict['attention_mask'][:1, :]
snake_case_ = inputs_dict['head_mask']
snake_case_ = 1
# first forward pass
snake_case_ = model(a , attention_mask=a , head_mask=a , use_cache=a )
snake_case_ , snake_case_ = outputs.to_tuple()
snake_case_ = past_key_values[1]
def __UpperCAmelCase ( a_ , a_ , a_ , a_=None , a_=None , a_=None , a_=None , a_=None , ):
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self , a , a , a , a , a ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = TFMBartModelTester(self )
snake_case_ = ConfigTester(self , config_class=a )
def _UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
lowerCAmelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
lowerCAmelCase = '''facebook/mbart-large-en-ro'''
@cached_property
def _UpperCamelCase ( self ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCamelCase ( self , **a ) -> List[Any]:
snake_case_ = self.translate_src_text(**a )
self.assertListEqual(self.expected_text , a )
def _UpperCamelCase ( self , **a ) -> Optional[Any]:
snake_case_ = self.tokenizer(self.src_text , **a , return_tensors='tf' )
snake_case_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case_ = self.tokenizer.batch_decode(a , skip_special_tokens=a )
return generated_words
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
self._assert_generated_batch_equal_expected()
| 607 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 626 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = '''true'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(__SCREAMING_SNAKE_CASE )
lowercase = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
lowercase = dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE )
lowercase = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches )
lowercase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for batch in dataloader:
lowercase , lowercase = batch.values()
with torch.no_grad():
lowercase = model(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase , lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 ):
lowercase , lowercase , lowercase = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase , lowercase = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False ):
lowercase = evaluate.load('glue' , 'mrpc' )
lowercase , lowercase = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# First do baseline
lowercase , lowercase , lowercase = setup['no']
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch['labels'] )
lowercase = metric.compute()
# Then do distributed
lowercase , lowercase , lowercase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase = batch['labels']
lowercase , lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase_ ( ):
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
__lowerCamelCase : Optional[str] = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__lowerCamelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
__lowerCamelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
__lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
__lowerCamelCase : Optional[int] = field(
default=1_0_0_0_0 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
__lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} )
__lowerCamelCase : Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
__lowerCamelCase : Optional[int] = field(
default=7_5_0 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
__lowerCamelCase : Optional[int] = field(
default=1_6 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
__lowerCamelCase : Optional[bool] = field(
default=snake_case , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
__lowerCamelCase : Optional[int] = field(default=5_0_0_0_0 , metadata={'''help''': '''Maximum number of training steps.'''} )
__lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__lowerCamelCase : Optional[int] = field(default=1_0_2_4 , metadata={'''help''': '''Sequence lengths used for training.'''} )
__lowerCamelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} )
__lowerCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
__lowerCamelCase : Optional[str] = field(
default=snake_case , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
__lowerCamelCase : Optional[bool] = field(default=snake_case , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__lowerCamelCase : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
__lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__lowerCamelCase : Optional[int] = field(default=1_0_2_4 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
__lowerCamelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__lowerCamelCase : Optional[int] = field(default=snake_case , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
__lowerCamelCase : Optional[int] = field(
default=snake_case , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
__lowerCamelCase : Optional[bool] = field(
default=snake_case , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
__lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
__lowerCamelCase : Optional[int] = field(default=2_5_6 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
__lowerCamelCase : Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
__lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
__lowerCamelCase : Optional[int] = field(default=1_0 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
__lowerCamelCase : Optional[int] = field(
default=2_0_0 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
__lowerCamelCase : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
__lowerCamelCase : Optional[str] = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
__lowerCamelCase : Optional[str] = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
__lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[int] = field(
default=snake_case , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
__lowerCamelCase : Optional[str] = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
__lowerCamelCase : Optional[int] = field(
default=1_0_0_0_0_0 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
__lowerCamelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__lowerCamelCase : Optional[float] = field(
default=1_0_0_0 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
__lowerCamelCase : Optional[float] = field(
default=1_0_0 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
__lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
__lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
__lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
__lowerCamelCase : Optional[bool] = field(
default=snake_case , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
__lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
__lowerCamelCase : Optional[str] = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
__lowerCamelCase : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__lowerCamelCase : Optional[int] = field(default=2_0_0_0_0_0 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
__lowerCamelCase : Optional[int] = field(
default=3_2_7_6_8 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
__lowerCamelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
__lowerCamelCase : Optional[bool] = field(default=snake_case , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
__lowerCamelCase : Optional[str] = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
__lowerCamelCase : Optional[int] = field(default=snake_case , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
__lowerCamelCase : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
__lowerCamelCase : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
__lowerCamelCase : Optional[bool] = field(default=snake_case , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 706 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
__lowerCamelCase : bool = field(default=snake_case , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
__lowerCamelCase : bool = field(
default=snake_case , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__lowerCamelCase : bool = field(default=snake_case , metadata={'''help''': '''whether to use adafactor'''} )
__lowerCamelCase : Optional[float] = field(
default=snake_case , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
__lowerCamelCase : Optional[float] = field(
default=snake_case , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
__lowerCamelCase : Optional[float] = field(default=snake_case , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
__lowerCamelCase : Optional[float] = field(
default=snake_case , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
__lowerCamelCase : Optional[str] = field(
default='''linear''' , metadata={'''help''': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 315 | 0 |
"""simple docstring"""
import os
import sys
import unittest
_lowerCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
_lowerCAmelCase : str = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = {"BertModelTest": "BertModelTester"}
_lowerCamelCase : Union[str, Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_lowerCamelCase : Optional[Any] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_lowerCamelCase : List[str] = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase ) | 46 |
from maths.prime_check import is_prime
def _UpperCAmelCase ( UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = F"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase )
if is_prime(UpperCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 611 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
# ===== initialization =====
__snake_case : Union[str, Any] = Mock()
__snake_case : Tuple = conn, Mock()
__snake_case : Union[str, Any] = iter([1, None] )
__snake_case : List[Any] = lambda __SCREAMING_SNAKE_CASE : next(__lowerCAmelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=__lowerCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 718 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : List[Any] = StableUnCLIPPipeline
A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
A : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
A : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A : Tuple = False
def snake_case__ ( self : str ):
__snake_case : List[str] = 32
__snake_case : str = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=_lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__snake_case : Tuple = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
__snake_case : Tuple = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=_lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
__snake_case : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCAmelCase )
__snake_case : Dict = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__snake_case : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__snake_case : Tuple = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCAmelCase , layers_per_block=1 , upcast_attention=_lowerCAmelCase , use_linear_projection=_lowerCAmelCase , )
torch.manual_seed(0 )
__snake_case : Optional[int] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
__snake_case : Any = AutoencoderKL()
__snake_case : Dict = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any]=0 ):
if str(_lowerCAmelCase ).startswith("""mps""" ):
__snake_case : List[str] = torch.manual_seed(_lowerCAmelCase )
else:
__snake_case : int = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__snake_case : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def snake_case__ ( self : int ):
__snake_case : Tuple = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_lowerCAmelCase )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
__snake_case : List[str] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case : Dict = pipe("""anime turle""" , generator=_lowerCAmelCase , output_type="""np""" )
__snake_case : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case : int = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
__snake_case : Any = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case : Dict = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
__snake_case : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 390 | 0 |
from math import isclose, sqrt
def __a ( A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = point_y / 4 / point_x
SCREAMING_SNAKE_CASE = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
SCREAMING_SNAKE_CASE = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
SCREAMING_SNAKE_CASE = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
SCREAMING_SNAKE_CASE = outgoing_gradient**2 + 4
SCREAMING_SNAKE_CASE = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
SCREAMING_SNAKE_CASE = (point_y - outgoing_gradient * point_x) ** 2 - 100
SCREAMING_SNAKE_CASE = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
SCREAMING_SNAKE_CASE = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
SCREAMING_SNAKE_CASE = x_minus if isclose(A__ , A__ ) else x_plus
SCREAMING_SNAKE_CASE = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __a ( A__ : float = 1.4 , A__ : float = -9.6 ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = first_x_coord
SCREAMING_SNAKE_CASE = first_y_coord
SCREAMING_SNAKE_CASE = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next_point(A__ , A__ , A__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'{solution() = }') | 16 |
'''simple docstring'''
def _A ( snake_case , snake_case ) -> str:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_lowercase : Dict = str(bin(snake_case ) )[2:] # remove the leading "0b"
_lowercase : str = str(bin(snake_case ) )[2:] # remove the leading "0b"
_lowercase : Optional[Any] = max(len(snake_case ) , len(snake_case ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(snake_case ) , b_binary.zfill(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
A__: List[str] = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
def __init__( self :Dict , *SCREAMING_SNAKE_CASE :Optional[Any] , **SCREAMING_SNAKE_CASE :List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 700 |
'''simple docstring'''
from functools import lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> set:
_a : Any =2
_a : Tuple =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_UpperCAmelCase )
if n > 1:
factors.add(_UpperCAmelCase )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
return len(unique_prime_factors(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> bool:
return len(set(_UpperCAmelCase ) ) in (0, 1)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> list:
_a : int =2
while True:
# Increment each value of a generated range
_a : str =[base + i for i in range(_UpperCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_a : List[Any] =[upf_len(_UpperCAmelCase ) for x in group]
checker.append(_UpperCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_UpperCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4 ) -> int:
_a : Optional[int] =run(_UpperCAmelCase )
return results[0] if len(_UpperCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 506 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE_ : Optional[int] = 10
def _lowercase ( self ,**_SCREAMING_SNAKE_CASE ) -> int:
_snake_case = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def _lowercase ( self ) -> List[Any]:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] ,[0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE ,beta_end=_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Optional[int]:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
_snake_case = torch.manual_seed(0 )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
_snake_case = sample.to(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
_snake_case = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def _lowercase ( self ) -> List[Any]:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(prediction_type="v_prediction" )
_snake_case = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
_snake_case = torch.manual_seed(0 )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
_snake_case = sample.to(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
_snake_case = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def _lowercase ( self ) -> Optional[Any]:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps ,device=_SCREAMING_SNAKE_CASE )
_snake_case = torch.manual_seed(0 )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_snake_case = sample.to(_SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
_snake_case = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
_snake_case = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def _lowercase ( self ) -> Optional[Any]:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**_SCREAMING_SNAKE_CASE ,use_karras_sigmas=_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps ,device=_SCREAMING_SNAKE_CASE )
_snake_case = torch.manual_seed(0 )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_snake_case = sample.to(_SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
_snake_case = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
_snake_case = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 185 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase_ : Dict = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ : Tuple = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : str = BartTokenizer
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="replace" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="<mask>" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> List[Any]:
super().__init__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,errors=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE ,trim_offsets=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,_SCREAMING_SNAKE_CASE ) != add_prefix_space:
_snake_case = getattr(_SCREAMING_SNAKE_CASE ,pre_tok_state.pop("type" ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**_SCREAMING_SNAKE_CASE )
_snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_snake_case = "post_processor"
_snake_case = getattr(self.backend_tokenizer ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
_snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case = tuple(state["sep"] )
if "cls" in state:
_snake_case = tuple(state["cls"] )
_snake_case = False
if state.get("add_prefix_space" ,_SCREAMING_SNAKE_CASE ) != add_prefix_space:
_snake_case = add_prefix_space
_snake_case = True
if state.get("trim_offsets" ,_SCREAMING_SNAKE_CASE ) != trim_offsets:
_snake_case = trim_offsets
_snake_case = True
if changes_to_apply:
_snake_case = getattr(_SCREAMING_SNAKE_CASE ,state.pop("type" ) )
_snake_case = component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> str:
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else value
_snake_case = value
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
_snake_case = kwargs.get("is_split_into_words" ,_SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
_snake_case = kwargs.get("is_split_into_words" ,_SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE ,name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Tuple:
_snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 185 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
# Initialise PyTorch model
snake_case_ : int = TaConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case_ : int = TaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__a , __a , __a )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 534 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : Dict , _A : List[str] , _A : Dict=13 , _A : str=30 , _A : Any=2 , _A : Dict=3 , _A : Optional[Any]=True , _A : Tuple=True , _A : List[str]=32 , _A : int=5 , _A : Optional[int]=4 , _A : Dict=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : str=0.1 , _A : int=10 , _A : Union[str, Any]=0.0_2 , ) -> int:
"""simple docstring"""
snake_case_ : int = parent
snake_case_ : str = batch_size
snake_case_ : List[str] = image_size
snake_case_ : Tuple = patch_size
snake_case_ : str = num_channels
snake_case_ : List[str] = is_training
snake_case_ : List[str] = use_labels
snake_case_ : str = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : Any = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Tuple = (image_size // patch_size) ** 2
snake_case_ : List[Any] = num_patches + 1
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[Any] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase_ ( self : int , _A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = FlaxViTModel(config=_A )
snake_case_ : Optional[Any] = model(_A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Union[str, Any] = (self.image_size, self.image_size)
snake_case_ : int = (self.patch_size, self.patch_size)
snake_case_ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase_ ( self : str , _A : Any , _A : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ : Dict = self.type_sequence_label_size
snake_case_ : Any = FlaxViTForImageClassification(config=_A )
snake_case_ : Optional[int] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : List[str] = 1
snake_case_ : Dict = FlaxViTForImageClassification(_A )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Any = model(_A )
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,
) : Any = config_and_inputs
snake_case_ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase_ ( self : List[str] ) -> None:
"""simple docstring"""
snake_case_ : Union[str, Any] = FlaxViTModelTester(self )
snake_case_ : Any = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def UpperCAmelCase_ ( self : int ) -> List[str]:
"""simple docstring"""
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_A )
snake_case_ : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ : Dict = self._prepare_for_class(_A , _A )
snake_case_ : Tuple = model_class(_A )
@jax.jit
def model_jitted(_A : Optional[int] , **_A : Any ):
return model(pixel_values=_A , **_A )
with self.subTest('JIT Enabled' ):
snake_case_ : Optional[Any] = model_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
snake_case_ : Optional[Any] = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ : List[Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
snake_case_ : Optional[Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_A )
| 534 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
# getting number of pixels in the image
UpperCAmelCase_ , UpperCAmelCase_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
UpperCAmelCase_ = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_lowerCamelCase = imread('image_data/lena.jpg', 1)
# convert to its negative
_lowerCamelCase = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 144 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ = [], []
while len(__UpperCamelCase ) > 1:
UpperCAmelCase_ , UpperCAmelCase_ = min(__UpperCamelCase ), max(__UpperCamelCase )
start.append(__UpperCamelCase )
end.append(__UpperCamelCase )
collection.remove(__UpperCamelCase )
collection.remove(__UpperCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 144 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Optional[int] = CpmAntTokenizer
snake_case :List[str] = False
def _snake_case ( self ):
super().setUp()
__UpperCAmelCase : Any = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def _snake_case ( self ):
__UpperCAmelCase : str = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
__UpperCAmelCase : str = "今天天气真好!"
__UpperCAmelCase : str = ["今天", "天气", "真", "好", "!"]
__UpperCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : List[str] = "今天天气真好!"
__UpperCAmelCase : Any = [tokenizer.bos_token] + tokens
__UpperCAmelCase : Any = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
__UpperCAmelCase : int = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 10 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> int:
_snake_case : List[str] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
_snake_case : int = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
_snake_case : Dict = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_snake_case : Tuple = key[key.find('patch_embed' ) + len('patch_embed' )]
_snake_case : Any = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowerCAmelCase )-1}""" )
if "norm" in key:
_snake_case : Optional[Any] = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_snake_case : Optional[int] = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
_snake_case : List[str] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowerCAmelCase )-1}""" )
if "layer_norm1" in key:
_snake_case : str = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_snake_case : Dict = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_snake_case : List[Any] = key[key.find('block' ) + len('block' )]
_snake_case : Tuple = key.replace(F"""block{idx}""" , F"""block.{int(lowerCAmelCase )-1}""" )
if "attn.q" in key:
_snake_case : str = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_snake_case : Dict = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_snake_case : str = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_snake_case : Optional[int] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_snake_case : str = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_snake_case : str = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_snake_case : List[str] = key.replace('linear_fuse.conv' , 'linear_fuse' )
_snake_case : str = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_snake_case : int = key[key.find('linear_c' ) + len('linear_c' )]
_snake_case : Optional[Any] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowerCAmelCase )-1}""" )
if "bot_conv" in key:
_snake_case : Optional[Any] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
_snake_case : Union[str, Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
_snake_case : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
_snake_case : str = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
_snake_case : Dict = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
_snake_case : Dict = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
_snake_case : str = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
_snake_case : Union[str, Any] = key.replace('module.last_layer_depth' , 'head.head' )
_snake_case : Optional[int] = value
return new_state_dict
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] )-> Dict:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_snake_case : Union[str, Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_snake_case : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_snake_case : Any = kv_weight[
: config.hidden_sizes[i], :
]
_snake_case : Dict = kv_bias[: config.hidden_sizes[i]]
_snake_case : List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
_snake_case : List[str] = kv_bias[config.hidden_sizes[i] :]
def lowerCamelCase_ ( )-> Tuple:
_snake_case : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case : Optional[Any] = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: int , lowerCAmelCase: Dict=False , lowerCAmelCase: Dict=None )-> Optional[Any]:
_snake_case : Optional[Any] = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_snake_case : Dict = GLPNImageProcessor()
# prepare image
_snake_case : str = prepare_img()
_snake_case : Optional[Any] = image_processor(images=lowerCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
_snake_case : Dict = torch.load(lowerCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
_snake_case : Union[str, Any] = rename_keys(lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase , lowerCAmelCase )
# create HuggingFace model and load state dict
_snake_case : List[Any] = GLPNForDepthEstimation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# forward pass
_snake_case : Dict = model(lowerCAmelCase )
_snake_case : str = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_snake_case : List[str] = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
_snake_case : Optional[Any] = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
_snake_case : Tuple = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase , atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase , )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 411 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=2 , UpperCamelCase : str=True , UpperCamelCase : List[str]=False , UpperCamelCase : Tuple=10 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Tuple=32 * 4 , UpperCamelCase : Tuple=32 * 6 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : List[Any]=32 , ):
'''simple docstring'''
_snake_case : List[Any] = parent
_snake_case : Optional[Any] = batch_size
_snake_case : List[str] = is_training
_snake_case : Optional[int] = use_auxiliary_loss
_snake_case : Optional[Any] = num_queries
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = min_size
_snake_case : Dict = max_size
_snake_case : str = num_labels
_snake_case : List[Any] = mask_feature_size
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase )
_snake_case : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase )
_snake_case : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase ) > 0.5
).float()
_snake_case : Any = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase ) > 0.5).long()
_snake_case : List[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : int = output.encoder_hidden_states
_snake_case : Tuple = output.pixel_decoder_hidden_states
_snake_case : Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase ) , config.decoder_config.decoder_layers )
def UpperCamelCase_ ( self : Any , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : str=False ):
'''simple docstring'''
with torch.no_grad():
_snake_case : str = MaskFormerModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Tuple = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase )
_snake_case : str = model(UpperCamelCase , output_hidden_states=UpperCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : str = MaskFormerForInstanceSegmentation(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
def comm_check_on_output(UpperCamelCase : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case : Tuple = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase )
_snake_case : Optional[Any] = model(UpperCamelCase )
comm_check_on_output(UpperCamelCase )
_snake_case : Union[str, Any] = model(
pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase )
comm_check_on_output(UpperCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] =(MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a_ : Tuple =(
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a_ : Any =False
a_ : List[str] =False
a_ : List[str] =False
a_ : Optional[Any] =False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = MaskFormerModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(UpperCamelCase )
_snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Optional[Any] = [*signature.parameters.keys()]
_snake_case : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_snake_case : int = MaskFormerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = (self.model_tester.min_size,) * 2
_snake_case : Optional[int] = {
'pixel_values': torch.randn((2, 3, *size) , device=UpperCamelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=UpperCamelCase ),
'class_labels': torch.zeros(2 , 10 , device=UpperCamelCase ).long(),
}
_snake_case : Optional[int] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase )
_snake_case : Any = model(**UpperCamelCase )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = model_class(UpperCamelCase ).to(UpperCamelCase )
_snake_case : Dict = model(**UpperCamelCase , output_attentions=UpperCamelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_snake_case : Dict = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
_snake_case : int = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.train()
_snake_case : Optional[Any] = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase ).loss
loss.backward()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs()
_snake_case : List[str] = True
_snake_case : List[Any] = True
_snake_case : List[Any] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.train()
_snake_case : Dict = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase )
_snake_case : List[str] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_snake_case : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase_ = 1E-4
def lowerCamelCase_ ( )-> List[Any]:
_snake_case : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(UpperCamelCase )
_snake_case : Dict = self.default_image_processor
_snake_case : Tuple = prepare_img()
_snake_case : str = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
_snake_case : int = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_snake_case : Union[str, Any] = model(**UpperCamelCase )
_snake_case : Tuple = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
_snake_case : Optional[int] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
_snake_case : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(UpperCamelCase )
.eval()
)
_snake_case : Any = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : List[str] = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
_snake_case : List[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_snake_case : Optional[Any] = model(**UpperCamelCase )
# masks_queries_logits
_snake_case : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case : Any = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_snake_case : str = torch.tensor(UpperCamelCase ).to(UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
# class_queries_logits
_snake_case : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case : Tuple = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(UpperCamelCase )
.eval()
)
_snake_case : int = self.default_image_processor
_snake_case : Optional[int] = prepare_img()
_snake_case : int = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
_snake_case : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_snake_case : List[Any] = model(**UpperCamelCase )
# masks_queries_logits
_snake_case : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case : List[Any] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_snake_case : Union[str, Any] = torch.tensor(UpperCamelCase ).to(UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
# class_queries_logits
_snake_case : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case : Union[str, Any] = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(UpperCamelCase )
.eval()
)
_snake_case : Optional[int] = self.default_image_processor
_snake_case : Dict = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='pt' , )
_snake_case : List[str] = inputs['pixel_values'].to(UpperCamelCase )
_snake_case : Tuple = [el.to(UpperCamelCase ) for el in inputs['mask_labels']]
_snake_case : Optional[int] = [el.to(UpperCamelCase ) for el in inputs['class_labels']]
with torch.no_grad():
_snake_case : List[str] = model(**UpperCamelCase )
self.assertTrue(outputs.loss is not None )
| 411 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( lowercase__ , unittest.TestCase ):
__A : Dict = BioGptTokenizer
__A : Dict = False
def UpperCAmelCase_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCAmelCase_ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowerCAmelCase_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase_ = "lower"
lowerCAmelCase_ = ["low", "er</w>"]
lowerCAmelCase_ = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = tokens + ["<unk>"]
lowerCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowerCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
lowerCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 704 | '''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case_ ( __snake_case : Any) -> Any:
lowerCAmelCase_ = filter(lambda __snake_case: p.requires_grad , model.parameters())
lowerCAmelCase_ = sum([np.prod(p.size()) for p in model_parameters])
return params
A_ : Union[str, Any] =logging.getLogger(__name__)
def snake_case_ ( __snake_case : str , __snake_case : List[str]) -> str:
if metric == "rouge2":
lowerCAmelCase_ = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowerCAmelCase_ = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowerCAmelCase_ = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
lowerCAmelCase_ = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''')
lowerCAmelCase_ = ModelCheckpoint(
dirpath=__snake_case , filename=__snake_case , monitor=F'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any]) -> List[Any]:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__snake_case , verbose=__snake_case , )
class __UpperCAmelCase ( pl.Callback ):
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = {F'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ):
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowerCAmelCase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowerCAmelCase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCAmelCase_ = od / '''test_results.txt'''
lowerCAmelCase_ = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCAmelCase_ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowerCAmelCase_ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCAmelCase_ = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
lowerCAmelCase_ = val.item()
lowerCAmelCase_ = F'''{key}: {val:.6f}\n'''
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
lowerCAmelCase_ = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
try:
lowerCAmelCase_ = pl_module.model.model.num_parameters()
except AttributeError:
lowerCAmelCase_ = pl_module.model.num_parameters()
lowerCAmelCase_ = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 606 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Dict = inspect.getfile(accelerate.test_utils )
snake_case__ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
snake_case__ : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
snake_case__ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def lowerCamelCase ( self : Optional[Any] ):
print(f"Found {torch.cuda.device_count()} devices." )
snake_case__ : List[Any] = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self : Dict ):
print(f"Found {torch.cuda.device_count()} devices." )
snake_case__ : List[Any] = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self : int ):
snake_case__ : Optional[Any] = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self : Optional[int] ):
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
snake_case__ : Tuple = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
__a = Accelerator()
__a = (accelerator.state.process_index + 2, 10)
__a = torch.randint(0, 10, shape).to(accelerator.device)
__a = ""
__a = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__a = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__a = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 374 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Any = 'ibert'
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=False , snake_case="none" , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = quant_mode
UpperCamelCase__ = force_dequant
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 185 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.get_dummy_input()
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , ):
'''simple docstring'''
snake_case: List[Any] = 4
snake_case: Any = 32
snake_case: Dict = (32, 32)
snake_case: str = torch.manual_seed(0 )
snake_case: List[Any] = torch.device(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = (batch_size, num_channels) + sizes
snake_case: Optional[Any] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = {'hidden_states': hidden_states}
if include_temb:
snake_case: List[str] = 1_28
snake_case: str = randn_tensor((batch_size, temb_channels) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
if include_res_hidden_states_tuple:
snake_case: int = torch.manual_seed(1 )
snake_case: int = (randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ),)
if include_encoder_hidden_states:
snake_case: List[Any] = floats_tensor((batch_size, 32, 32) ).to(SCREAMING_SNAKE_CASE__ )
if include_skip_sample:
snake_case: Dict = randn_tensor(((batch_size, 3) + sizes) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
return dummy_input
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 1_28,
}
if self.block_type == "up":
snake_case: int = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
snake_case: Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case , snake_case: Optional[Any] = self.prepare_init_args_and_inputs_for_common()
snake_case: str = self.block_class(**SCREAMING_SNAKE_CASE__ )
unet_block.to(SCREAMING_SNAKE_CASE__ )
unet_block.eval()
with torch.no_grad():
snake_case: int = unet_block(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = output[0]
self.assertEqual(output.shape , self.output_shape )
snake_case: List[Any] = output[0, -1, -3:, -3:]
snake_case: Any = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(output_slice.flatten() , SCREAMING_SNAKE_CASE__ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case , snake_case: Dict = self.prepare_init_args_and_inputs_for_common()
snake_case: Optional[Any] = self.block_class(**SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
snake_case: List[Any] = model(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: str = output[0]
snake_case: Optional[Any] = torch.device(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = randn_tensor(output.shape , device=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward() | 329 |
'''simple docstring'''
import operator as op
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
snake_case: List[Any] = []
snake_case: Optional[Any] = lambda __A , __A : int(x / y ) # noqa: E731 integer division operation
snake_case: Dict = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__A )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__A ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
else:
snake_case: Tuple = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
snake_case: Any = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' )
stack.append(
str(opr[x](int(__A ) , int(__A ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__A ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__UpperCAmelCase = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix)) | 329 | 1 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 703 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=None , a_=None , a_=None , a_="resnet50" , a_=3 , a_=32 , a_=3 , a_=True , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : Tuple = out_indices if out_indices is not None else [4]
__snake_case : Optional[Any] = stage_names
__snake_case : str = out_features
__snake_case : List[str] = backbone
__snake_case : Optional[int] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : str = num_channels
__snake_case : Optional[int] = use_pretrained_backbone
__snake_case : Optional[int] = is_training
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = TimmBackbone(config=a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : int = model(a_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case : List[Any] = config_and_inputs
__snake_case : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TimmBackbone,) if is_torch_available() else ()
lowerCamelCase__ ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = TimmBackboneModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''resnet18'''
__snake_case : Tuple = '''microsoft/resnet-18'''
__snake_case : Dict = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ )
__snake_case : Tuple = AutoBackbone.from_pretrained(a_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case : Optional[Any] = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ , out_indices=[1, 2, 3] )
__snake_case : Optional[int] = AutoBackbone.from_pretrained(a_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = model_class(a_ )
__snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Tuple = [*signature.parameters.keys()]
__snake_case : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = True
__snake_case : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case : Dict = self.all_model_classes[0]
__snake_case : Optional[int] = model_class(a_ )
model.to(a_ )
__snake_case : int = self._prepare_for_class(a_ , a_ )
__snake_case : Optional[Any] = model(**a_ )
__snake_case : int = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case : int = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case : int = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : List[str] = model(**a_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case : Optional[Any] = copy.deepcopy(a_ )
__snake_case : str = None
__snake_case : int = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(**a_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case : Union[str, Any] = copy.deepcopy(a_ )
__snake_case : int = False
__snake_case : List[str] = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(**a_ )
| 229 | 0 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCamelCase_ = '''__DUMMY_TRANSFORMERS_USER__'''
lowerCamelCase_ = '''Dummy User'''
lowerCamelCase_ = '''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'''
lowerCamelCase_ = '''https://hub-ci.huggingface.co'''
lowerCamelCase_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
lowerCamelCase_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
lowerCamelCase_ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def snake_case ( A__ ):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" ,A__ )
@pytest.fixture
def snake_case ( A__ ):
monkeypatch.setattr("datasets.config.HF_ENDPOINT" ,A__ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" ,A__ )
@pytest.fixture
def snake_case ( A__ ):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" ,A__ )
@pytest.fixture
def snake_case ( A__ ,A__ ):
HfFolder.save_token(A__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def snake_case ( ):
return HfApi(endpoint=A__ )
@pytest.fixture(scope="session" )
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[Any] = HfFolder.get_token()
HfFolder.save_token(A__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(A__ )
@pytest.fixture
def snake_case ( A__ ):
def _cleanup_repo(A__ ):
hf_api.delete_repo(A__ ,token=A__ ,repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def snake_case ( A__ ):
@contextmanager
def _temporary_repo(A__ ):
try:
yield repo_id
finally:
cleanup_repo(A__ )
return _temporary_repo
@pytest.fixture(scope="session" )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}"""
UpperCAmelCase_ : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(A__ ,token=A__ ,repo_type="dataset" ,private=A__ )
hf_api.upload_file(
token=A__ ,path_or_fileobj=str(A__ ) ,path_in_repo="data/text_data.txt" ,repo_id=A__ ,repo_type="dataset" ,)
yield repo_id
try:
hf_api.delete_repo(A__ ,token=A__ ,repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case ( A__ ,A__ ,A__ ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
UpperCAmelCase_ : int = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(A__ ,token=A__ ,repo_type="dataset" ,private=A__ )
hf_api.upload_file(
token=A__ ,path_or_fileobj=str(A__ ) ,path_in_repo="data.zip" ,repo_id=A__ ,repo_type="dataset" ,)
yield repo_id
try:
hf_api.delete_repo(A__ ,token=A__ ,repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case ( A__ ,A__ ,A__ ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
UpperCAmelCase_ : Tuple = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(A__ ,token=A__ ,repo_type="dataset" ,private=A__ )
hf_api.upload_file(
token=A__ ,path_or_fileobj=str(A__ ) ,path_in_repo="data.zip" ,repo_id=A__ ,repo_type="dataset" ,)
yield repo_id
try:
hf_api.delete_repo(A__ ,token=A__ ,repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def snake_case ( A__ ,A__ ,A__ ):
return hf_private_dataset_repo_zipped_img_data_
| 95 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
a = 0.0
for coeff in reversed(snake_case_ ):
a = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase__ : int = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 387 | 0 |
'''simple docstring'''
from __future__ import annotations
def __A ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) <= 1 or n <= 1:
return
insert_next(_SCREAMING_SNAKE_CASE , n - 1 )
rec_insertion_sort(_SCREAMING_SNAKE_CASE , n - 1 )
def __A ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = (
collection[index],
collection[index - 1],
)
insert_next(_SCREAMING_SNAKE_CASE , index + 1 )
if __name__ == "__main__":
lowercase = input('''Enter integers separated by spaces: ''')
lowercase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 564 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Dict = IFInpaintingSuperResolutionPipeline
snake_case__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
snake_case__ : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a_ ( self ):
return self._get_superresolution_dummy_components()
def a_ ( self , a__ , a__=0 ):
if str(a__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(a__ )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a_ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def a_ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a_ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a_ ( self ):
self._test_save_load_local()
def a_ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 564 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Optional[Any] ="new-model"
if is_tf_available():
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Dict =NewModelConfig
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
a__ = 'bert-base-cased'
a__ = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
a__ = TFAutoModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
a__ = 'bert-base-cased'
a__ = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
a__ = TFAutoModelForPreTraining.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def _lowerCAmelCase ( self : Any ) -> List[str]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
a__ = TFAutoModelForCausalLM.from_pretrained(_snake_case )
a__ , a__ = TFAutoModelForCausalLM.from_pretrained(_snake_case , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
a__ = TFAutoModelWithLMHead.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
a__ = TFAutoModelForMaskedLM.from_pretrained(_snake_case )
a__ , a__ = TFAutoModelForMaskedLM.from_pretrained(_snake_case , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
a__ = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case )
a__ , a__ = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
a__ = TFAutoModelForSequenceClassification.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
a__ = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
@slow
@require_tensorflow_probability
def _lowerCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
a__ = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
a__ = TFAutoModelForTableQuestionAnswering.from_pretrained(_snake_case )
a__ , a__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
_snake_case , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def _lowerCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
a__ = TFAutoModelWithLMHead.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) , 1_4410 )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
a__ = TFAutoModelWithLMHead.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) , 1_4410 )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
a__ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(_snake_case , _snake_case )
a__ = copy.deepcopy(model.config )
a__ = ['FunnelBaseModel']
a__ = TFAutoModel.from_config(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case )
a__ = TFAutoModel.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
try:
AutoConfig.register('new-model' , _snake_case )
a__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_snake_case ):
auto_class.register(_snake_case , _snake_case )
auto_class.register(_snake_case , _snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_snake_case ):
auto_class.register(_snake_case , _snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
a__ = BertModelTester(self ).get_config()
a__ = NewModelConfig(**tiny_config.to_dict() )
a__ = auto_class.from_config(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case )
a__ = auto_class.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
_snake_case , 'bert-base is not a local folder and is not a valid model identifier' ):
a__ = TFAutoModel.from_pretrained('bert-base' )
def _lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
_snake_case , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
a__ = TFAutoModel.from_pretrained(_snake_case , revision='aaaaaa' )
def _lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
_snake_case , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
a__ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(_snake_case , 'Use `from_pt=True` to load this model' ):
a__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
a__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
a__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
a__ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
a__ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 232 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Union[str, Any] ="audio-spectrogram-transformer"
def __init__( self : Optional[int] , _snake_case : Tuple=768 , _snake_case : Optional[int]=12 , _snake_case : Dict=12 , _snake_case : List[Any]=3072 , _snake_case : Dict="gelu" , _snake_case : List[Any]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : int=0.02 , _snake_case : Dict=1E-12 , _snake_case : int=16 , _snake_case : str=True , _snake_case : Any=10 , _snake_case : Any=10 , _snake_case : Tuple=1024 , _snake_case : Dict=128 , **_snake_case : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(**_snake_case )
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = patch_size
a__ = qkv_bias
a__ = frequency_stride
a__ = time_stride
a__ = max_length
a__ = num_mel_bins
| 232 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Any:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = 13
UpperCamelCase = 7
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = 99
UpperCamelCase = 384
UpperCamelCase = 2
UpperCamelCase = 4
UpperCamelCase = 37
UpperCamelCase = 'gelu'
UpperCamelCase = 0.1
UpperCamelCase = 0.1
UpperCamelCase = 512
UpperCamelCase = 16
UpperCamelCase = 2
UpperCamelCase = 0.02
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = 128
UpperCamelCase = 2
UpperCamelCase = 9
UpperCamelCase = 1
UpperCamelCase = None
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFConvBertModel(config=A_ )
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase = [input_ids, input_mask]
UpperCamelCase = model(A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = TFConvBertForMaskedLM(config=A_ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFConvBertForSequenceClassification(config=A_ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = TFConvBertForMultipleChoice(config=A_ )
UpperCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFConvBertForTokenClassification(config=A_ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = TFConvBertForQuestionAnswering(config=A_ )
UpperCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : Any = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Any = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = TFConvBertModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
UpperCamelCase = True
if hasattr(A_ , 'use_cache' ):
UpperCamelCase = True
UpperCamelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase = getattr(self.model_tester , 'key_length' , A_ )
for model_class in self.all_model_classes:
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model_class(A_ )
UpperCamelCase = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase = os.path.join(A_ , 'saved_model' , '1' )
UpperCamelCase = tf.keras.models.load_model(A_ )
UpperCamelCase = model(A_ )
if self.is_encoder_decoder:
UpperCamelCase = outputs['encoder_hidden_states']
UpperCamelCase = outputs['encoder_attentions']
else:
UpperCamelCase = outputs['hidden_states']
UpperCamelCase = outputs['attentions']
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
UpperCamelCase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase = getattr(self.model_tester , 'key_length' , A_ )
UpperCamelCase = getattr(self.model_tester , 'key_length' , A_ )
def check_decoder_attentions_output(A_ ):
UpperCamelCase = len(A_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
UpperCamelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
UpperCamelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase = model(A_ )[0]
UpperCamelCase = [1, 6, 768]
self.assertEqual(output.shape , A_ )
UpperCamelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : List[str] = int(length / 2 )
for i in range(__snake_case , low + middle ):
comp_and_swap(__snake_case , __snake_case , i + middle , __snake_case )
bitonic_merge(__snake_case , __snake_case , __snake_case , __snake_case )
bitonic_merge(__snake_case , low + middle , __snake_case , __snake_case )
def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : List[str] = int(length / 2 )
bitonic_sort(__snake_case , __snake_case , __snake_case , 1 )
bitonic_sort(__snake_case , low + middle , __snake_case , 0 )
bitonic_merge(__snake_case , __snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 88 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowercase__ :
__UpperCAmelCase = XGLMConfig
__UpperCAmelCase = {}
__UpperCAmelCase = '''gelu'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> List[str]:
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : Any = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = d_model
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : int = ffn_dim
_lowerCamelCase : str = activation_function
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Tuple = attention_dropout
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : str = 1
def UpperCamelCase_ ( self) -> int:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""")
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
_lowerCamelCase : str = None
if self.use_input_mask:
_lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : Tuple = self.get_config()
_lowerCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCamelCase_ ( self) -> Optional[int]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE , )
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : str = config_and_inputs
_lowerCamelCase : Optional[Any] = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Optional[Any] = TFXGLMModelTester(self)
_lowerCamelCase : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37)
def UpperCamelCase_ ( self) -> Dict:
self.config_tester.run_common_tests()
@slow
def UpperCamelCase_ ( self) -> List[Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""")
def UpperCamelCase_ ( self) -> List[Any]:
super().test_resize_token_embeddings()
@require_tf
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=True) -> List[Any]:
_lowerCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
_lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCamelCase : Dict = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
_lowerCamelCase : str = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
_lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
tf.random.set_seed(0)
_lowerCamelCase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""")
_lowerCamelCase : Any = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0"""):
_lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , seed=[7, 0])
_lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""")
_lowerCamelCase : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
_lowerCamelCase : List[Any] = """left"""
# use different length sentences to test batching
_lowerCamelCase : List[Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = inputs["""input_ids"""]
_lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12)
_lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""").input_ids
_lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12)
_lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""").input_ids
_lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12)
_lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
| 88 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : str ) -> Optional[int]:
__UpperCAmelCase: str = len(_lowercase )
for i in range(_lowercase ):
for j in range(i + 1 , _lowercase ):
if numbers[j] < numbers[i]:
__UpperCAmelCase, __UpperCAmelCase: List[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted)) | 709 | '''simple docstring'''
def UpperCamelCase__ ( _lowercase : list ) -> list:
if len(_lowercase ) <= 1:
return lst
__UpperCAmelCase: List[str] = 1
while i < len(_lowercase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__UpperCAmelCase, __UpperCAmelCase: Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
__UpperCAmelCase: List[str] = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted)) | 466 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : BigBirdConfig
__a : jnp.dtype = jnp.floataa
__a : bool = True
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setup()
lowercase_ : Dict = nn.Dense(5, dtype=self.dtype )
def __call__( self, *snake_case__, **snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[Any] = super().__call__(*snake_case__, **snake_case__ )
lowercase_ : int = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Tuple = FlaxBigBirdForNaturalQuestionsModule
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
"""simple docstring"""
def cross_entropy(lowercase , lowercase , lowercase=None ):
lowercase_ : Optional[int] = logits.shape[-1]
lowercase_ : int = (labels[..., None] == jnp.arange(lowercase )[None]).astype("""f4""" )
lowercase_ : Tuple = jax.nn.log_softmax(lowercase , axis=-1 )
lowercase_ : List[str] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase_ : Dict = reduction(lowercase )
return loss
lowercase_ : Dict = partial(lowercase , reduction=jnp.mean )
lowercase_ : Optional[int] = cross_entropy(lowercase , lowercase )
lowercase_ : List[Any] = cross_entropy(lowercase , lowercase )
lowercase_ : Optional[int] = cross_entropy(lowercase , lowercase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__a : str = "google/bigbird-roberta-base"
__a : int = 3_000
__a : int = 10_500
__a : int = 128
__a : int = 3
__a : int = 1
__a : int = 5
# tx_args
__a : float = 3e-5
__a : float = 0.0
__a : int = 20_000
__a : float = 0.0_0_9_5
__a : str = "bigbird-roberta-natural-questions"
__a : str = "training-expt"
__a : str = "data/nq-training.jsonl"
__a : str = "data/nq-validation.jsonl"
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(self.base_dir, exist_ok=snake_case__ )
lowercase_ : str = os.path.join(self.base_dir, self.save_dir )
lowercase_ : Optional[Any] = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__a : int
__a : int = 4_096 # no dynamic padding on TPUs
def __call__( self, snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Tuple = self.collate_fn(snake_case__ )
lowercase_ : Union[str, Any] = jax.tree_util.tree_map(snake_case__, snake_case__ )
return batch
def snake_case__ ( self, snake_case__ ) -> List[str]:
"""simple docstring"""
lowercase_ , lowercase_ : Tuple = self.fetch_inputs(features["""input_ids"""] )
lowercase_ : List[Any] = {
"""input_ids""": jnp.array(snake_case__, dtype=jnp.intaa ),
"""attention_mask""": jnp.array(snake_case__, dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""], dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""], dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""], dtype=jnp.intaa ),
}
return batch
def snake_case__ ( self, snake_case__ ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[Any] = [self._fetch_inputs(snake_case__ ) for ids in input_ids]
return zip(*snake_case__ )
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
lowercase_ : List[str] = [1 for _ in range(len(snake_case__ ) )]
while len(snake_case__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __magic_name__ ( lowercase , lowercase , lowercase=None ) -> str:
"""simple docstring"""
if seed is not None:
lowercase_ : Optional[Any] = dataset.shuffle(seed=lowercase )
for i in range(len(lowercase ) // batch_size ):
lowercase_ : Any = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase )
@partial(jax.pmap , axis_name="""batch""" )
def __magic_name__ ( lowercase , lowercase , **lowercase ) -> Optional[int]:
"""simple docstring"""
def loss_fn(lowercase ):
lowercase_ : Union[str, Any] = model_inputs.pop("""start_labels""" )
lowercase_ : int = model_inputs.pop("""end_labels""" )
lowercase_ : str = model_inputs.pop("""pooled_labels""" )
lowercase_ : List[str] = state.apply_fn(**lowercase , params=lowercase , dropout_rng=lowercase , train=lowercase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = outputs
return state.loss_fn(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
lowercase_ , lowercase_ : Optional[int] = jax.random.split(lowercase )
lowercase_ : List[str] = jax.value_and_grad(lowercase )
lowercase_ , lowercase_ : Optional[Any] = grad_fn(state.params )
lowercase_ : List[Any] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase_ : Tuple = jax.lax.pmean(lowercase , """batch""" )
lowercase_ : Union[str, Any] = state.apply_gradients(grads=lowercase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def __magic_name__ ( lowercase , **lowercase ) -> Tuple:
"""simple docstring"""
lowercase_ : Union[str, Any] = model_inputs.pop("""start_labels""" )
lowercase_ : int = model_inputs.pop("""end_labels""" )
lowercase_ : List[str] = model_inputs.pop("""pooled_labels""" )
lowercase_ : Optional[int] = state.apply_fn(**lowercase , params=state.params , train=lowercase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = outputs
lowercase_ : List[Any] = state.loss_fn(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowercase_ : List[Any] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class UpperCamelCase__ ( train_state.TrainState ):
'''simple docstring'''
__a : Callable = struct.field(pytree_node=lowerCamelCase__ )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__a : Args
__a : Callable
__a : Callable
__a : Callable
__a : Callable
__a : wandb
__a : Callable = None
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__=None ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[Any] = model.params
lowercase_ : Tuple = TrainState.create(
apply_fn=model.__call__, params=snake_case__, tx=snake_case__, loss_fn=snake_case__, )
if ckpt_dir is not None:
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = restore_checkpoint(snake_case__, snake_case__ )
lowercase_ : Optional[Any] = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase_ , lowercase_ : Union[str, Any] = build_tx(**snake_case__ )
lowercase_ : Optional[Any] = train_state.TrainState(
step=snake_case__, apply_fn=model.__call__, params=snake_case__, tx=snake_case__, opt_state=snake_case__, )
lowercase_ : str = args
lowercase_ : Dict = data_collator
lowercase_ : Tuple = lr
lowercase_ : Any = params
lowercase_ : Any = jax_utils.replicate(snake_case__ )
return state
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ ) -> Tuple:
"""simple docstring"""
lowercase_ : Optional[Any] = self.args
lowercase_ : str = len(snake_case__ ) // args.batch_size
lowercase_ : List[str] = jax.random.PRNGKey(0 )
lowercase_ : List[str] = jax.random.split(snake_case__, jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase_ : List[str] = jnp.array(0, dtype=jnp.floataa )
lowercase_ : List[str] = get_batched_dataset(snake_case__, args.batch_size, seed=snake_case__ )
lowercase_ : str = 0
for batch in tqdm(snake_case__, total=snake_case__, desc=f"""Running EPOCH-{epoch}""" ):
lowercase_ : str = self.data_collator(snake_case__ )
lowercase_ , lowercase_ , lowercase_ : Any = self.train_step_fn(snake_case__, snake_case__, **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
lowercase_ : Dict = jax_utils.unreplicate(state.step )
lowercase_ : List[Any] = running_loss.item() / i
lowercase_ : Dict = self.scheduler_fn(state_step - 1 )
lowercase_ : str = self.evaluate(snake_case__, snake_case__ )
lowercase_ : int = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(snake_case__ ) )
self.logger.log(snake_case__, commit=snake_case__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""", state=snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = get_batched_dataset(snake_case__, self.args.batch_size )
lowercase_ : List[str] = len(snake_case__ ) // self.args.batch_size
lowercase_ : Optional[Any] = jnp.array(0, dtype=jnp.floataa )
lowercase_ : Optional[int] = 0
for batch in tqdm(snake_case__, total=snake_case__, desc="""Evaluating ... """ ):
lowercase_ : Any = self.data_collator(snake_case__ )
lowercase_ : int = self.val_step_fn(snake_case__, **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def snake_case__ ( self, snake_case__, snake_case__ ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[int] = jax_utils.unreplicate(snake_case__ )
print(f"""SAVING CHECKPOINT IN {save_dir}""", end=""" ... """ )
self.model_save_fn(snake_case__, params=state.params )
with open(os.path.join(snake_case__, """opt_state.msgpack""" ), """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args, os.path.join(snake_case__, """args.joblib""" ) )
joblib.dump(self.data_collator, os.path.join(snake_case__, """data_collator.joblib""" ) )
with open(os.path.join(snake_case__, """training_state.json""" ), """w""" ) as f:
json.dump({"""step""": state.step.item()}, snake_case__ )
print("""DONE""" )
def __magic_name__ ( lowercase , lowercase ) -> Tuple:
"""simple docstring"""
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=""" ... """ )
with open(os.path.join(lowercase , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase_ : Optional[int] = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase_ : Optional[int] = from_bytes(state.opt_state , f.read() )
lowercase_ : Union[str, Any] = joblib.load(os.path.join(lowercase , """args.joblib""" ) )
lowercase_ : int = joblib.load(os.path.join(lowercase , """data_collator.joblib""" ) )
with open(os.path.join(lowercase , """training_state.json""" ) , """r""" ) as f:
lowercase_ : Dict = json.load(lowercase )
lowercase_ : Tuple = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Optional[int] = num_train_steps - warmup_steps
lowercase_ : Tuple = optax.linear_schedule(init_value=lowercase , end_value=lowercase , transition_steps=lowercase )
lowercase_ : Optional[Any] = optax.linear_schedule(init_value=lowercase , end_value=1E-7 , transition_steps=lowercase )
lowercase_ : str = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
"""simple docstring"""
def weight_decay_mask(lowercase ):
lowercase_ : List[str] = traverse_util.flatten_dict(lowercase )
lowercase_ : str = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase )
lowercase_ : int = scheduler_fn(lowercase , lowercase , lowercase , lowercase )
lowercase_ : List[Any] = optax.adamw(learning_rate=lowercase , weight_decay=lowercase , mask=lowercase )
return tx, lr | 458 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 458 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
__SCREAMING_SNAKE_CASE = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0xe_0_0_0
__SCREAMING_SNAKE_CASE = 0xe_0_0_1
__SCREAMING_SNAKE_CASE = 0xe_0_0_2
__SCREAMING_SNAKE_CASE = 0xe_0_0_3
__SCREAMING_SNAKE_CASE = 0xe_0_0_4
# Maps special codepoints to human-readable names.
__SCREAMING_SNAKE_CASE = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '[CLS]',
SEP: '[SEP]',
BOS: '[BOS]',
MASK: '[MASK]',
PAD: '[PAD]',
RESERVED: '[RESERVED]',
}
# Maps special codepoint human-readable names to their codepoint values.
__SCREAMING_SNAKE_CASE = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=False , __UpperCAmelCase=2_048 , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Any =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
SCREAMING_SNAKE_CASE_ : str =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
SCREAMING_SNAKE_CASE_ : Any =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
SCREAMING_SNAKE_CASE_ : Optional[Any] =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
SCREAMING_SNAKE_CASE_ : Any =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : str =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , model_max_length=__UpperCAmelCase , **__UpperCAmelCase , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE_ : str ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE_ : List[Any] =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE_ : Optional[int] ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE_ : Optional[Any] =UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE_ : List[str] =len(self._special_codepoints )
@property
def __lowerCamelCase ( self ):
return self._unicode_vocab_size
def __lowerCamelCase ( self , __UpperCAmelCase ):
return list(__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase ):
try:
return ord(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def __lowerCamelCase ( self , __UpperCAmelCase ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def __lowerCamelCase ( self , __UpperCAmelCase ):
return "".join(__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any =[self.cls_token_id]
SCREAMING_SNAKE_CASE_ : str =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =[1] + ([0] * len(__UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__UpperCAmelCase )) + [1]
return result
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : str =[self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] =[self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
return ()
| 701 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ : Dict =KarrasVeScheduler()
SCREAMING_SNAKE_CASE_ : Tuple =KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int =pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type='numpy' ).images
SCREAMING_SNAKE_CASE_ : Any =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int =pipe(num_inference_steps=2 , generator=__UpperCAmelCase , output_type='numpy' , return_dict=__UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : str =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Tuple =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='google/ncsnpp-celebahq-256'
SCREAMING_SNAKE_CASE_ : List[str] =UNetaDModel.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =KarrasVeScheduler()
SCREAMING_SNAKE_CASE_ : Dict =KarrasVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe(num_inference_steps=20 , generator=__UpperCAmelCase , output_type='numpy' ).images
SCREAMING_SNAKE_CASE_ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE_ : List[Any] =np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 153 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 154 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase_ = logging.get_logger(__name__)
lowercase_ = TypeVar('''DatasetType''', Dataset, IterableDataset)
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = "first_exhausted", ) ->DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(UpperCAmelCase ):
if not isinstance(UpperCAmelCase, (Dataset, IterableDataset) ):
if isinstance(UpperCAmelCase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(UpperCAmelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase ).__name__}.''' )
if i == 0:
__magic_name__ , __magic_name__ : Union[str, Any] = (
(Dataset, IterableDataset) if isinstance(UpperCAmelCase, UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, stopping_strategy=UpperCAmelCase )
else:
return _interleave_iterable_datasets(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, stopping_strategy=UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = 0, ) ->DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(UpperCAmelCase ):
if not isinstance(UpperCAmelCase, (Dataset, IterableDataset) ):
if isinstance(UpperCAmelCase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(UpperCAmelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase ).__name__}.''' )
if i == 0:
__magic_name__ , __magic_name__ : int = (
(Dataset, IterableDataset) if isinstance(UpperCAmelCase, UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, axis=UpperCAmelCase )
else:
return _concatenate_iterable_datasets(UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, axis=UpperCAmelCase )
| 154 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCAmelCase: int = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : float , **__snake_case : Optional[int] ):
a : Optional[Any] = feature_size
a : Tuple = sampling_rate
a : str = padding_value
a : Any = kwargs.pop('padding_side' , 'right' )
a : Tuple = kwargs.pop('return_attention_mask' , __snake_case )
super().__init__(**__snake_case )
def lowercase_ ( self : Tuple , __snake_case : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case : Union[bool, str, PaddingStrategy] = True , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a : Union[str, Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
a : Tuple = processed_features[self.model_input_names[0]]
a : str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
a : Dict = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a : Optional[int] = required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a : Dict = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
a : Tuple = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
a : Any = 'tf'
elif is_torch_tensor(__snake_case ):
a : Optional[Any] = 'pt'
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
a : List[Any] = 'np'
else:
raise ValueError(
F"""type of {first_element} unknown: {type(__snake_case )}. """
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a : List[Any] = to_numpy(__snake_case )
else:
a : Tuple = [to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
a : Any = self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
a : int = processed_features[self.model_input_names[0]]
a : Dict = len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
a : Optional[int] = []
for i in range(__snake_case ):
a : int = {k: v[i] for k, v in processed_features.items()}
# truncation
a : List[str] = self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a : List[str] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a : Tuple = PaddingStrategy.MAX_LENGTH
a : int = {}
for i in range(__snake_case ):
# padding
a : int = self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
a : int = []
if value.dtype is np.dtype(np.floataa ):
a : List[str] = value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : List[str] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a : Any = len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a : Dict = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a : List[Any] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a : Union[str, Any] = np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
a : Tuple = max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
a : Any = np.pad(
processed_features['attention_mask'] , (0, difference) )
a : Optional[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a : Optional[int] = np.pad(
__snake_case , __snake_case , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a : Optional[int] = np.pad(
processed_features['attention_mask'] , (difference, 0) )
a : Optional[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a : List[Any] = np.pad(
__snake_case , __snake_case , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
a : Dict = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a : str = len(__snake_case ) > max_length
if needs_to_be_truncated:
a : str = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a : List[str] = processed_features['attention_mask'][:max_length]
return processed_features
def lowercase_ ( self : Dict , __snake_case : Optional[Any]=False , __snake_case : Optional[int]=None ):
# Get padding strategy
if padding is not False:
if padding is True:
a : Union[str, Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
a : List[Any] = PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
a : List[str] = padding
else:
a : Dict = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy | 714 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
lowerCAmelCase: Dict = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
lowerCAmelCase: Optional[Any] = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def lowerCamelCase__ ( _A ):
a : List[Any] = set()
a : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a : Tuple = char
a : Any = set(_A )
return pairs
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any="<s>" , __snake_case : List[Any]="</s>" , __snake_case : Union[str, Any]="</s>" , __snake_case : List[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : List[Any]="<mask>" , **__snake_case : int , ):
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , **__snake_case , )
a : Optional[int] = vocab_file
a : Optional[Any] = merges_file
a : int = {}
a : List[str] = 0
a : Union[str, Any] = 1
a : Optional[Any] = 2
a : List[Any] = 3
self.add_from_file(__snake_case )
a : Dict = {v: k for k, v in self.encoder.items()}
with open(__snake_case , encoding='utf-8' ) as merges_handle:
a : Optional[int] = merges_handle.read().split('\n' )[:-1]
a : Any = [tuple(merge.split()[:-1] ) for merge in merges]
a : List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
a : List[str] = {}
def lowercase_ ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
a : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def lowercase_ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : Dict = [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Optional[int] ):
return len(self.encoder )
def lowercase_ ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Optional[int] , __snake_case : Optional[Any] ):
if token in self.cache:
return self.cache[token]
a : int = tuple(__snake_case )
a : int = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
a : Dict = get_pairs(__snake_case )
if not pairs:
return token
while True:
a : str = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
a , a : Tuple = bigram
a : int = []
a : Optional[int] = 0
while i < len(__snake_case ):
try:
a : List[str] = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a : Dict = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a : Optional[Any] = tuple(__snake_case )
a : Tuple = new_word
if len(__snake_case ) == 1:
break
else:
a : Union[str, Any] = get_pairs(__snake_case )
a : List[Any] = '@@ '.join(__snake_case )
a : int = word[:-4]
a : Optional[int] = word
return word
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Optional[int] = []
a : Optional[Any] = re.findall(r'\S+\n?' , __snake_case )
for token in words:
split_tokens.extend(list(self.bpe(__snake_case ).split(' ' ) ) )
return split_tokens
def lowercase_ ( self : List[Any] , __snake_case : Dict ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Dict , __snake_case : Dict ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : List[Any] , __snake_case : Any ):
a : Union[str, Any] = ' '.join(__snake_case ).replace('@@ ' , '' ).strip()
return out_string
def lowercase_ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
a : int = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
if os.path.abspath(self.merges_file ) != os.path.abspath(__snake_case ):
copyfile(self.merges_file , __snake_case )
return out_vocab_file, out_merge_file
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
if isinstance(__snake_case , __snake_case ):
try:
with open(__snake_case , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(__snake_case )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
a : List[str] = f.readlines()
for lineTmp in lines:
a : Any = lineTmp.strip()
a : Dict = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
a : str = line[:idx]
a : Optional[int] = len(self.encoder ) | 195 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( __magic_name__ :List[str] , __magic_name__ :Dict , __magic_name__ :Any=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
UpperCAmelCase_ = nn.Parameter(__magic_name__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
UpperCAmelCase_ = nn.Parameter(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :List[Any] , __magic_name__ :Any ):
# set torch weights for 1-to-1 comparison
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__magic_name__ ).transpose(1 , 2 ).contiguous().view(-1 , __magic_name__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__magic_name__ ).transpose(1 , 2 ).contiguous().view(-1 , __magic_name__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(__magic_name__ ).view(-1 , __magic_name__ ).contiguous().transpose(0 , 1 ) , )
def _lowerCAmelCase ( __magic_name__ :Optional[int] , __magic_name__ :Dict , __magic_name__ :List[str] ):
# set torch weights for 1-to-1 comparison
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
UpperCAmelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__magic_name__ ).transpose(1 , 2 ).contiguous().view(-1 , __magic_name__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__magic_name__ ).transpose(1 , 2 ).contiguous().view(-1 , __magic_name__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__magic_name__ ).transpose(1 , 2 ).contiguous().view(-1 , __magic_name__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(__magic_name__ ).view(-1 , __magic_name__ ).contiguous().transpose(0 , 1 ) , )
def _lowerCAmelCase ( __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Optional[Any] ):
# layernorm 1
UpperCAmelCase_ = weights[0][0][0]
UpperCAmelCase_ = np.asarray(layer_norm_a[0] )
UpperCAmelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__magic_name__ ) , torch.tensor(__magic_name__ ) , )
# lsh weights + output
UpperCAmelCase_ = weights[0][1]
if len(__magic_name__ ) < 4:
set_layer_weights_in_torch_lsh(__magic_name__ , torch_block.attention , __magic_name__ )
else:
set_layer_weights_in_torch_local(__magic_name__ , torch_block.attention , __magic_name__ )
# intermediate weighs
UpperCAmelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__magic_name__ ) == 4:
UpperCAmelCase_ = intermediate_weights[2]
# layernorm 2
UpperCAmelCase_ = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__magic_name__ ) , torch.tensor(__magic_name__ ) , )
# intermediate dense
UpperCAmelCase_ = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__magic_name__ ).transpose(0 , 1 ).contiguous() , torch.tensor(__magic_name__ ) , )
# intermediate out
UpperCAmelCase_ = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__magic_name__ ).transpose(0 , 1 ).contiguous() , torch.tensor(__magic_name__ ) , )
def _lowerCAmelCase ( __magic_name__ :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :str ):
# reformer model
UpperCAmelCase_ = torch_model.reformer
# word embeds
UpperCAmelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__magic_name__ ) , )
if isinstance(weights[3] , __magic_name__ ):
UpperCAmelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
UpperCAmelCase_ = nn.Parameter(torch.tensor(__magic_name__ ) )
UpperCAmelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__magic_name__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__magic_name__ , __magic_name__ , __magic_name__ )
# output layer norm
UpperCAmelCase_ = np.asarray(weights[7][0] )
UpperCAmelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__magic_name__ ) , torch.tensor(__magic_name__ ) , )
# output embeddings
UpperCAmelCase_ = np.asarray(weights[9][0] )
UpperCAmelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__magic_name__ ).transpose(0 , 1 ).contiguous() , torch.tensor(__magic_name__ ) , )
def _lowerCAmelCase ( __magic_name__ :Optional[int] , __magic_name__ :Optional[int] , __magic_name__ :Tuple ):
# Initialise PyTorch model
UpperCAmelCase_ = ReformerConfig.from_json_file(__magic_name__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = ReformerModelWithLMHead(__magic_name__ )
with open(__magic_name__ , '''rb''' ) as f:
UpperCAmelCase_ = pickle.load(__magic_name__ )['''weights''']
set_model_weights_in_torch(__magic_name__ , __magic_name__ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __magic_name__ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 121 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : str = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __A ( a , unittest.TestCase ):
"""simple docstring"""
A_ = TextToVideoSDPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
A_ = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def snake_case_( self )-> Optional[Any]:
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , )
lowercase__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
lowercase__ = CLIPTextModel(_lowerCamelCase )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def snake_case_( self , _lowerCamelCase , _lowerCamelCase=0 )-> Union[str, Any]:
if str(_lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(_lowerCamelCase )
else:
lowercase__ = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def snake_case_( self )-> List[str]:
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = TextToVideoSDPipeline(**_lowerCamelCase )
lowercase__ = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__ = self.get_dummy_inputs(_lowerCamelCase )
lowercase__ = '''np'''
lowercase__ = sd_pipe(**_lowerCamelCase ).frames
lowercase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
lowercase__ = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_( self )-> str:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowerCamelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case_( self )-> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowerCamelCase , expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case_( self )-> Any:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case_( self )-> List[str]:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def snake_case_( self )-> List[Any]:
pass
def snake_case_( self )-> Optional[int]:
return super().test_progress_bar()
@slow
@skip_mps
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> Dict:
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
lowercase__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ = pipe.to('''cuda''' )
lowercase__ = '''Spiderman is surfing'''
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ = pipe(_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=2_5 , output_type='''pt''' ).frames
lowercase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def snake_case_( self )-> str:
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
lowercase__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
lowercase__ = pipe.to('''cuda''' )
lowercase__ = '''Spiderman is surfing'''
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ = pipe(_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''pt''' ).frames
lowercase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 708 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCAmelCase ( lowercase : List[Any] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any]=1_0_2_4 ) ->str:
"""simple docstring"""
lowercase__ , lowercase__ = [], []
lowercase__ = list(zip(lowercase , lowercase ) )
lowercase__ , lowercase__ = sorted_examples[0]
def is_too_big(lowercase : Union[str, Any] ):
return tok(lowercase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowercase__ = new_src + ''' ''' + src
lowercase__ = new_tgt + ''' ''' + tgt
if is_too_big(lowercase ) or is_too_big(lowercase ): # cant fit, finalize example
finished_src.append(lowercase )
finished_tgt.append(lowercase )
lowercase__ , lowercase__ = src, tgt
else: # can fit, keep adding
lowercase__ , lowercase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowercase )
finished_tgt.append(lowercase )
return finished_src, finished_tgt
def _lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Path , lowercase : str , lowercase : List[str] ) ->Dict:
"""simple docstring"""
lowercase__ = Path(lowercase )
save_path.mkdir(exist_ok=lowercase )
for split in ["train"]:
lowercase__ , lowercase__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
lowercase__ = [x.rstrip() for x in Path(lowercase ).open().readlines()]
lowercase__ = [x.rstrip() for x in Path(lowercase ).open().readlines()]
lowercase__ , lowercase__ = pack_examples(lowercase , lowercase , lowercase , lowercase )
print(F'''packed {split} split from {len(lowercase )} examples -> {len(lowercase )}.''' )
Path(save_path / F'''{split}.source''' ).open('''w''' ).write('''\n'''.join(lowercase ) )
Path(save_path / F'''{split}.target''' ).open('''w''' ).write('''\n'''.join(lowercase ) )
for split in ["val", "test"]:
lowercase__ , lowercase__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(lowercase , save_path / F'''{split}.source''' )
shutil.copyfile(lowercase , save_path / F'''{split}.target''' )
def _lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=lowercase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=lowercase , default=1_2_8 )
parser.add_argument('''--data_dir''' , type=lowercase )
parser.add_argument('''--save_path''' , type=lowercase )
lowercase__ = parser.parse_args()
lowercase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowercase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 318 | 0 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _UpperCAmelCase:
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
raise NotImplementedError()
class _UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
def __init__( self , __a , __a = False , **__a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = tokenizer
_UpperCamelCase = skip_prompt
_UpperCamelCase = decode_kwargs
# variables used in the streaming process
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = True
def UpperCAmelCase ( self , __a) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''')
elif len(value.shape) > 1:
_UpperCamelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_UpperCamelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist())
_UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n'''):
_UpperCamelCase = text[self.print_len :]
_UpperCamelCase = []
_UpperCamelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(_lowercase) > 0 and self._is_chinese_char(ord(text[-1])):
_UpperCamelCase = text[self.print_len :]
self.print_len += len(_lowercase)
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_UpperCamelCase = text[self.print_len : text.rfind(''' ''') + 1]
self.print_len += len(_lowercase)
self.on_finalized_text(_lowercase)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
if len(self.token_cache) > 0:
_UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
_UpperCamelCase = text[self.print_len :]
_UpperCamelCase = []
_UpperCamelCase = 0
else:
_UpperCamelCase = """"""
_UpperCamelCase = True
self.on_finalized_text(_lowercase , stream_end=_lowercase)
def UpperCAmelCase ( self , __a , __a = False) -> List[str]:
'''simple docstring'''
print(_lowercase , flush=_lowercase , end='''''' if not stream_end else None)
def UpperCAmelCase ( self , __a) -> List[Any]:
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
class _UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
def __init__( self , __a , __a = False , __a = None , **__a) -> List[str]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase , **_lowercase)
_UpperCamelCase = Queue()
_UpperCamelCase = None
_UpperCamelCase = timeout
def UpperCAmelCase ( self , __a , __a = False) -> List[Any]:
'''simple docstring'''
self.text_queue.put(_lowercase , timeout=self.timeout)
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout)
def __iter__( self) -> List[Any]:
'''simple docstring'''
return self
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.text_queue.get(timeout=self.timeout)
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 19 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58 | 0 |
def a ( A__ : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
from math import pow, sqrt
def a ( *A__ : float ) -> bool:
"""simple docstring"""
_lowercase =len(A__ ) > 0 and all(value > 0.0 for value in values )
return result
def a ( A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 380 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Any:
A_ : Dict = args.pruning_method
A_ : Any = args.threshold
A_ : Dict = args.model_name_or_path.rstrip("/" )
A_ : Optional[Any] = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
A_ : int = torch.load(os.path.join(_lowerCAmelCase , "pytorch_model.bin" ) )
A_ : int = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A_ : str = tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
A_ : Tuple = tensor
print(f"Copied layer {name}" )
elif "bias" in name:
A_ : str = tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
A_ : Optional[Any] = MagnitudeBinarizer.apply(inputs=_lowerCAmelCase , threshold=_lowerCAmelCase )
A_ : List[str] = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A_ : str = name[:-6]
A_ : List[Any] = model[f"{prefix_}mask_scores"]
A_ : Tuple = TopKBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase )
A_ : Tuple = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A_ : Any = name[:-6]
A_ : Any = model[f"{prefix_}mask_scores"]
A_ : str = ThresholdBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A_ : Optional[Any] = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A_ : str = name[:-6]
A_ : List[Any] = model[f"{prefix_}mask_scores"]
A_ , A_ : List[str] = -0.1, 1.1
A_ : Union[str, Any] = torch.sigmoid(_lowerCAmelCase )
A_ : Optional[Any] = s * (r - l) + l
A_ : Optional[int] = s_bar.clamp(min=0.0 , max=1.0 )
A_ : Dict = tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
A_ : Optional[int] = os.path.join(
os.path.dirname(_lowerCAmelCase ) , f"bertarized_{os.path.basename(_lowerCAmelCase )}" )
if not os.path.isdir(_lowerCAmelCase ):
shutil.copytree(_lowerCAmelCase , _lowerCAmelCase )
print(f"\nCreated folder {target_model_path}" )
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCAmelCase : Dict = parser.parse_args()
main(args)
| 454 |
from timeit import timeit
_lowerCAmelCase : Tuple = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __snake_case ( _lowerCAmelCase : str ) -> bool:
A_ : List[str] = 0
A_ : str = len(_lowerCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __snake_case ( _lowerCAmelCase : str ) -> bool:
A_ : int = len(_lowerCAmelCase ) // 2
A_ : Union[str, Any] = len(_lowerCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_lowerCAmelCase ) )
def __snake_case ( _lowerCAmelCase : str ) -> bool:
if len(_lowerCAmelCase ) <= 2:
return True
if s[0] == s[len(_lowerCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __snake_case ( _lowerCAmelCase : str ) -> bool:
return s == s[::-1]
def __snake_case ( _lowerCAmelCase : str ) -> None:
A_ : int = f"all({name}(key) is value for key, value in test_data.items())"
A_ : List[str] = f"from __main__ import test_data, {name}"
A_ : str = 500000
A_ : List[str] = timeit(stmt=_lowerCAmelCase , setup=_lowerCAmelCase , number=_lowerCAmelCase )
print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 454 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
lowerCAmelCase_ = DetaConfig(
backbone_config=__UpperCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__UpperCamelCase , with_box_refine=__UpperCamelCase , two_stage=__UpperCamelCase , )
# set labels
lowerCAmelCase_ = """huggingface/label-files"""
if "o365" in model_name:
lowerCAmelCase_ = 366
lowerCAmelCase_ = """object365-id2label.json"""
else:
lowerCAmelCase_ = 91
lowerCAmelCase_ = """coco-detection-id2label.json"""
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCAmelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(__UpperCamelCase )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:dim, :]
lowerCAmelCase_ = in_proj_bias[: dim]
lowerCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase_ = in_proj_weight[
-dim :, :
]
lowerCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase_ = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
lowerCAmelCase_ = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:hidden_size, :]
lowerCAmelCase_ = in_proj_bias[:hidden_size]
lowerCAmelCase_ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase_ = in_proj_weight[-hidden_size:, :]
lowerCAmelCase_ = in_proj_bias[-hidden_size:]
def __UpperCamelCase ( ):
lowerCAmelCase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = get_deta_config(__UpperCamelCase )
# load original state dict
if model_name == "deta-swin-large":
lowerCAmelCase_ = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase_ = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f"Model name {model_name} not supported" )
lowerCAmelCase_ = torch.load(__UpperCamelCase , map_location='''cpu''' )["""model"""]
# original state dict
for name, param in state_dict.items():
print(__UpperCamelCase , param.shape )
# rename keys
lowerCAmelCase_ = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_swin_q_k_v(__UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__UpperCamelCase , __UpperCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCAmelCase_ = state_dict.pop(__UpperCamelCase )
lowerCAmelCase_ = val
if "input_proj" in key:
lowerCAmelCase_ = state_dict.pop(__UpperCamelCase )
lowerCAmelCase_ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCAmelCase_ = state_dict.pop(__UpperCamelCase )
lowerCAmelCase_ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase_ = DetaForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
lowerCAmelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(__UpperCamelCase )
# load image processor
lowerCAmelCase_ = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = processor(images=__UpperCamelCase , return_tensors='''pt''' )
lowerCAmelCase_ = encoding["""pixel_values"""]
lowerCAmelCase_ = model(pixel_values.to(__UpperCamelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowerCAmelCase_ = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
lowerCAmelCase_ = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
lowerCAmelCase_ = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
lowerCAmelCase_ = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__UpperCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__UpperCamelCase ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f"jozhang97/{model_name}" )
processor.push_to_hub(f"jozhang97/{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 721 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_A = get_logger(__name__)
class A :
__snake_case = 'dummy_data'
__snake_case = 'datasets'
__snake_case = False
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False, UpperCamelCase__ = True, UpperCamelCase__ = None, ):
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = dataset_name
lowerCAmelCase_ = cache_dir
lowerCAmelCase_ = use_local_dummy_data
lowerCAmelCase_ = config
# download_callbacks take a single url as input
lowerCAmelCase_ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase_ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase_ = str(UpperCamelCase__ )
# to be downloaded
lowerCAmelCase_ = None
lowerCAmelCase_ = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCAmelCase_ = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''', self.config.name, self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''', self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder, '''dummy_data.zip''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase_ = cached_path(
UpperCamelCase__, cache_dir=self.cache_dir, extract_compressed_file=UpperCamelCase__, force_extract=UpperCamelCase__ )
return os.path.join(UpperCamelCase__, self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCAmelCase_ = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, '''/''' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep, '''/''' ).split('''/''' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, *UpperCamelCase__ ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase_ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase_ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
return self.create_dummy_data_dict(UpperCamelCase__, UpperCamelCase__ )
elif isinstance(UpperCamelCase__, (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase__, UpperCamelCase__ )
else:
return self.create_dummy_data_single(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, *UpperCamelCase__ ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return self.download_and_extract(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return path
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
for single_url in single_urls:
download_callback(UpperCamelCase__ )
else:
lowerCAmelCase_ = single_urls
download_callback(UpperCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = [os.path.join(UpperCamelCase__, urllib.parse.quote_plus(Path(UpperCamelCase__ ).name ) ) for x in single_urls]
else:
lowerCAmelCase_ = single_urls
lowerCAmelCase_ = os.path.join(UpperCamelCase__, urllib.parse.quote_plus(Path(UpperCamelCase__ ).name ) )
lowerCAmelCase_ = value
# make sure that values are unique
if all(isinstance(UpperCamelCase__, UpperCamelCase__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase_ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase_ = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''', UpperCamelCase__ ) ) for url in data_url )
lowerCAmelCase_ = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase_ = [data_url[0]] * len(UpperCamelCase__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ = os.path.join(UpperCamelCase__, urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(UpperCamelCase__ )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ = os.path.join(UpperCamelCase__, urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(UpperCamelCase__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
def _iter_archive_members(UpperCamelCase__ ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase_ = Path(self.dummy_file ).parent
lowerCAmelCase_ = path.relative_to(UpperCamelCase__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase_ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase__ )
lowerCAmelCase_ = Path(UpperCamelCase__ )
lowerCAmelCase_ = _iter_archive_members(UpperCamelCase__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(UpperCamelCase__ ).as_posix(), file_path.open('''rb''' )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase__ ):
if os.path.basename(UpperCamelCase__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase__ ):
if os.path.basename(UpperCamelCase__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(UpperCamelCase__, UpperCamelCase__ )
| 325 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''' , '''label''' )
snake_case_ = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
snake_case_ = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) | 39 |
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase , lowercase = [], []
while len(__magic_name__ ) > 1:
lowercase , lowercase = min(__magic_name__ ), max(__magic_name__ )
start.append(__magic_name__ )
end.append(__magic_name__ )
collection.remove(__magic_name__ )
collection.remove(__magic_name__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_snake_case : int = input("Enter numbers separated by a comma:\n").strip()
_snake_case : Dict = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 441 | 0 |
from __future__ import annotations
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
return [ord(_lowerCamelCase ) - 96 for elem in plain]
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , _lowerCamelCase )
print('Decoded:' , decode(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 716 | import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger()
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
def __lowercase( self : Dict , a_ : Dict , a_ : Tensor , a_ : Tensor )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self : Tuple , a_ : Tensor )-> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 1
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = True
def __call__( self : List[Any] , a_ : Tensor )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Tracker(self.dest )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : Optional[int] = Tracker(self.src )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : List[str] = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
SCREAMING_SNAKE_CASE__ : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(a_ )} operations while'''
F''' destination module has {len(a_ )}.''' )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class snake_case ( nn.Module ):
def __init__( self : List[Any] , a_ : nn.Module )-> Dict:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'''Unexpected layer name {k}'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
SCREAMING_SNAKE_CASE__ : Any = nn.ModuleDict(a_ )
def __lowercase( self : Tuple , a_ : Tensor )-> Dict:
"""simple docstring"""
return get_trunk_forward_outputs(
a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , )
class snake_case ( UpperCamelCase_ ):
def __lowercase( self : Optional[Any] , a_ : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Union[str, Any] , a_ : str )-> Callable[[], Tuple[nn.Module, Dict]]:
"""simple docstring"""
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE__ : Any = self.convert_name_to_timm(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) )
else:
SCREAMING_SNAKE_CASE__ : List[str] = super().__getitem__(a_ )
return val
class snake_case ( UpperCamelCase_ ):
def __getitem__( self : Any , a_ : str )-> Callable[[], nn.Module]:
"""simple docstring"""
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE__ : Any = RegNetModel
else:
SCREAMING_SNAKE_CASE__ : Any = RegNetForImageClassification
return val
def _a ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE__ : Tuple = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _a ( lowercase__ : str , lowercase__ : Callable[[], nn.Module] , lowercase__ : Callable[[], nn.Module] , lowercase__ : RegNetConfig , lowercase__ : Path , lowercase__ : bool = True , ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_model_func()
SCREAMING_SNAKE_CASE__ : int = our_model_func(lowercase__ ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(lowercase__ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : int = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE__ : Optional[Any] = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ )
our_model.load_state_dict(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = our_model(lowercase__ , output_hidden_states=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = (
our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE__ : List[Any] = from_model(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = from_output[-1] if type(lowercase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_24 if 'seer' not in name else 3_84
# we can use the convnext one
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase__ , )
print(f'''Pushed {name}''' )
def _a ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ : Tuple = 10_00
SCREAMING_SNAKE_CASE__ : Tuple = (1, num_labels)
SCREAMING_SNAKE_CASE__ : str = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = idalabel
SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
SCREAMING_SNAKE_CASE__ : List[Any] = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE__ : Dict = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowercase__ : str , lowercase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location='cpu' )
SCREAMING_SNAKE_CASE__ : Tuple = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE__ : str = files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE__ : str = model_state_dict['trunk']
model.load_state_dict(lowercase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : int = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 636 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
A : Any = logging.get_logger(__name__)
A : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A : str = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A : str = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A : Optional[Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
A : Optional[int] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
A : List[Any] = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
A : int = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
A : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
A : List[str] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCamelCase : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A : List[str] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
A : str = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
A : Tuple = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(SCREAMING_SNAKE_CASE )
class A :
'''simple docstring'''
def __call__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Union[bool, str] = False , __lowerCAmelCase : Union[bool, str] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = None , **__lowerCAmelCase : List[Any] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
__lowerCAmelCase , __lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = titles if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) else [titles]
A__ = texts if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) else [texts]
A__ = len(__lowerCAmelCase )
A__ = questions if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) else [questions] * n_passages
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
f'There should be as many titles than texts but got {len(__lowerCAmelCase )} titles and {len(__lowerCAmelCase )} texts.' )
A__ = super().__call__(__lowerCAmelCase , __lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )["""input_ids"""]
A__ = super().__call__(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )["""input_ids"""]
A__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCAmelCase , __lowerCAmelCase )
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A__ = attention_mask
return self.pad(__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
def a_ ( self : List[str] , __lowerCAmelCase : BatchEncoding , __lowerCAmelCase : DPRReaderOutput , __lowerCAmelCase : int = 16 , __lowerCAmelCase : int = 64 , __lowerCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
A__ = reader_input["""input_ids"""]
A__ , A__ , A__ = reader_output[:3]
A__ = len(__lowerCAmelCase )
A__ = sorted(range(__lowerCAmelCase ) , reverse=__lowerCAmelCase , key=relevance_logits.__getitem__ )
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id )
else:
A__ = len(__lowerCAmelCase )
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCAmelCase , top_spans=__lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCAmelCase , start_index=__lowerCAmelCase , end_index=__lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a_ ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
A__ = []
for start_index, start_score in enumerate(__lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A__ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] , reverse=__lowerCAmelCase )
A__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]' )
A__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Tuple = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : str = ['''input_ids''', '''attention_mask''']
| 176 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Optional[Any] = '''pt'''
elif is_tf_available():
A : List[Any] = '''tf'''
else:
A : Union[str, Any] = '''jax'''
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = ByTaTokenizer
__lowerCamelCase : Tuple = False
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
A__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def a_ ( self : Union[str, Any] , **__lowerCAmelCase : int ) -> ByTaTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=20 , __lowerCAmelCase : List[str]=5 ) -> Tuple[str, list]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
try:
A__ = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A__ = list(filter(lambda __lowerCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __lowerCAmelCase ) )
A__ = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
A__ = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
A__ = toks + toks
# toks_str = [t[1] for t in toks]
A__ = [t[0] for t in toks]
# Ensure consistency
A__ = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
A__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
A__ = """ """ + output_txt
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
A__ = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = """Unicode €."""
A__ = tokenizer(__lowerCAmelCase )
A__ = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
A__ = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """Unicode €.</s>""" )
A__ = tokenizer("""e è é ê ë""" )
A__ = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
A__ = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def a_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
A__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
A__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
if FRAMEWORK != "jax":
A__ = list(batch.input_ids.numpy()[0] )
else:
A__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def a_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __lowerCAmelCase )
self.assertIn("""attention_mask""" , __lowerCAmelCase )
self.assertNotIn("""decoder_input_ids""" , __lowerCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase )
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = [
"""Summary of the text.""",
"""Another summary.""",
]
A__ = tokenizer(
text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization. </s>"""]
A__ = ["""Summary of the text. </s>"""]
# fmt: off
A__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
A__ = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
A__ = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch["""input_ids"""][0] )
self.assertEqual(__lowerCAmelCase , batch["""labels"""][0] )
def a_ ( self : str ) -> Dict:
"""simple docstring"""
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = """ He is very happy, UNwant\u00E9d,running"""
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
A__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
A__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
A__ = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
A__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
A__ = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
A__ = json.load(__lowerCAmelCase )
A__ = [f'<extra_id_{i}>' for i in range(1_25 )]
A__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
A__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ = tokenizer_class.from_pretrained(
__lowerCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__lowerCAmelCase )]
A__ = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def a_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
A__ = tokenizer_class.from_pretrained(__lowerCAmelCase )
self.assertTrue(tokenizer.decode([2_55] ) == """""" )
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
def a_ ( self : int ) -> Dict:
"""simple docstring"""
A__ = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A__ = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
A__ = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : List[Any] ) -> int:
"""simple docstring"""
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A__ = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
A__ = 0
A__ = tokenizer.convert_ids_to_tokens(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
for attr in attributes_list:
setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [] )
setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 176 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
return choice(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[Any] = random_pivot(_lowerCAmelCase )
# partition based on pivot
# linear time
lowercase__ : Optional[int] = [e for e in lst if e < pivot]
lowercase__ : str = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_lowerCAmelCase ) < k - 1:
return kth_number(_lowerCAmelCase , k - len(_lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 704 | """simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645 | 0 |
_lowerCAmelCase : Dict = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_lowerCAmelCase : Optional[int] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def a_ ( UpperCamelCase_ : float , UpperCamelCase_ : str , UpperCamelCase_ : str ) -> float:
"""simple docstring"""
lowerCamelCase = from_type.lower().strip('s' )
lowerCamelCase = to_type.lower().strip('s' )
lowerCamelCase = UNIT_SYMBOL.get(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = UNIT_SYMBOL.get(UpperCamelCase_ , UpperCamelCase_ )
if from_sanitized not in METRIC_CONVERSION:
lowerCamelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(UpperCamelCase_ )}'''
)
raise ValueError(UpperCamelCase_ )
if to_sanitized not in METRIC_CONVERSION:
lowerCamelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(UpperCamelCase_ )}'''
)
raise ValueError(UpperCamelCase_ )
lowerCamelCase = METRIC_CONVERSION[from_sanitized]
lowerCamelCase = METRIC_CONVERSION[to_sanitized]
lowerCamelCase = 1
if from_exponent > to_exponent:
lowerCamelCase = from_exponent - to_exponent
else:
lowerCamelCase = -(to_exponent - from_exponent)
return value * pow(1_0 , UpperCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 246 |
from __future__ import annotations
import math
def a_ ( UpperCamelCase_ : float , UpperCamelCase_ : int ) -> float:
"""simple docstring"""
lowerCamelCase = u
for i in range(1 , UpperCamelCase_ ):
lowerCamelCase = temp * (u - i)
return temp
def a_ ( ) -> None:
"""simple docstring"""
lowerCamelCase = int(input('enter the numbers of values: ' ) )
lowerCamelCase = []
for _ in range(UpperCamelCase_ ):
y.append([] )
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
y[i].append(UpperCamelCase_ )
lowerCamelCase = 0
print('enter the values of parameters in a list: ' )
lowerCamelCase = list(map(UpperCamelCase_ , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(UpperCamelCase_ ):
lowerCamelCase = float(input() )
lowerCamelCase = int(input('enter the value to interpolate: ' ) )
lowerCamelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase_ ):
for j in range(n - i ):
lowerCamelCase = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase = y[0][0]
for i in range(1 , UpperCamelCase_ ):
summ += (ucal(UpperCamelCase_ , UpperCamelCase_ ) * y[0][i]) / math.factorial(UpperCamelCase_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 246 | 1 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =math.sqrt(a__ )
_lowerCAmelCase =1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =np.zeros((kernel_size, kernel_size) )
for i in range(0 , a__ ):
for j in range(0 , a__ ):
_lowerCAmelCase =math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(a__ , a__ )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ , ):
'''simple docstring'''
_lowerCAmelCase =np.zeros(img.shape )
_lowerCAmelCase =get_gauss_kernel(a__ , a__ )
_lowerCAmelCase , _lowerCAmelCase =img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_lowerCAmelCase =get_slice(a__ , a__ , a__ , a__ )
_lowerCAmelCase =img_s - img_s[kernel_size // 2, kernel_size // 2]
_lowerCAmelCase =vec_gaussian(a__ , a__ )
_lowerCAmelCase =np.multiply(a__ , a__ )
_lowerCAmelCase =np.multiply(a__ , a__ )
_lowerCAmelCase =np.sum(a__ ) / np.sum(a__ )
_lowerCAmelCase =val
return imga
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =args[1] if args[1:] else '../image_data/lena.jpg'
_lowerCAmelCase =float(args[2] ) if args[2:] else 1.0
_lowerCAmelCase =float(args[3] ) if args[3:] else 1.0
if args[4:]:
_lowerCAmelCase =int(args[4] )
_lowerCAmelCase =kernel_size + abs(kernel_size % 2 - 1 )
else:
_lowerCAmelCase =5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase_ , lowercase_ , lowercase_ , lowercase_ = parse_args(sys.argv)
lowercase_ = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowercase_ = img / 255
lowercase_ = out.astype('''float32''')
lowercase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase_ = out * 255
lowercase_ = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 58 | '''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Tuple = """roc_bert"""
def __init__( self, snake_case__=3_05_22, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=5_12, snake_case__=2, snake_case__=0.02, snake_case__=1E-12, snake_case__=True, snake_case__=0, snake_case__="absolute", snake_case__=None, snake_case__=True, snake_case__=True, snake_case__=7_68, snake_case__=9_10, snake_case__=5_12, snake_case__=2_48_58, snake_case__=True, **snake_case__, ) -> Tuple:
"""simple docstring"""
lowercase_ : Optional[int] = vocab_size
lowercase_ : str = max_position_embeddings
lowercase_ : List[Any] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : Any = initializer_range
lowercase_ : int = type_vocab_size
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : Any = use_cache
lowercase_ : Any = enable_pronunciation
lowercase_ : List[str] = enable_shape
lowercase_ : Optional[Any] = pronunciation_embed_dim
lowercase_ : Any = pronunciation_vocab_size
lowercase_ : Dict = shape_embed_dim
lowercase_ : List[str] = shape_vocab_size
lowercase_ : List[Any] = concat_input
lowercase_ : Optional[int] = position_embedding_type
lowercase_ : int = classifier_dropout
super().__init__(pad_token_id=snake_case__, **snake_case__ ) | 458 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 458 | 1 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
A: Optional[int] = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
A: Tuple = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
A: str = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=False , ) -> Any:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowercase_ : Any = np.array([re.sub(_lowercase , '' , _lowercase ) for x in predictions] )
lowercase_ : Union[str, Any] = np.array([re.sub(_lowercase , '' , _lowercase ) for x in references] )
else:
lowercase_ : int = np.asarray(_lowercase )
lowercase_ : str = np.asarray(_lowercase )
if ignore_case:
lowercase_ : Dict = np.char.lower(_lowercase )
lowercase_ : Union[str, Any] = np.char.lower(_lowercase )
if ignore_punctuation:
lowercase_ : Union[str, Any] = string.punctuation.maketrans('' , '' , string.punctuation )
lowercase_ : List[str] = np.char.translate(_lowercase , table=_lowercase )
lowercase_ : List[str] = np.char.translate(_lowercase , table=_lowercase )
if ignore_numbers:
lowercase_ : Optional[Any] = string.digits.maketrans('' , '' , string.digits )
lowercase_ : Optional[int] = np.char.translate(_lowercase , table=_lowercase )
lowercase_ : Dict = np.char.translate(_lowercase , table=_lowercase )
lowercase_ : Union[str, Any] = predictions == references
return {"exact_match": np.mean(_lowercase ) * 100}
| 7 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 1 |
"""simple docstring"""
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_lowerCamelCase : Tuple = 6
_lowerCamelCase : int = 1
_lowerCamelCase : Any = 19_01
_lowerCamelCase : int = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCamelCase : str = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_lowerCamelCase : Dict = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCamelCase : Union[str, Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
_lowerCamelCase : Optional[int] = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 83 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Any = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "ctrl"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple=246_534 , __SCREAMING_SNAKE_CASE : str=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1_280 , __SCREAMING_SNAKE_CASE : Dict=8_192 , __SCREAMING_SNAKE_CASE : Union[str, Any]=48 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-6 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Tuple=True , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = dff
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(**__SCREAMING_SNAKE_CASE )
| 627 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a ( unittest.TestCase ):
_lowercase = MODEL_FOR_MASKED_LM_MAPPING
_lowercase = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase : Union[str, Any] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase : Optional[int] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase : Optional[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase : List[str] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase : str = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase : List[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase : Optional[Any] = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase : int = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(A_ , A_ )
@slow
@require_torch
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(A_ )
@slow
@require_tf
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(A_ ) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase : Optional[int] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(A_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase : Any = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(A_ ) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : List[Any] = None
self.run_pipeline_test(A_ , [] )
@require_tf
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : str = None
self.run_pipeline_test(A_ , [] )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase : Tuple = FillMaskPipeline(model=A_ , tokenizer=A_ )
_UpperCAmelCase : Tuple = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = fill_masker.tokenizer
_UpperCAmelCase : Tuple = fill_masker.model
_UpperCAmelCase : Union[str, Any] = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
_UpperCAmelCase : Optional[Any] = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
_UpperCAmelCase : List[str] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
A_ , [
[
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
],
[
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
],
] , )
with self.assertRaises(A_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(A_ ):
fill_masker("This is" )
self.run_test_top_k(A_ , A_ )
self.run_test_targets(A_ , A_ )
self.run_test_top_k_targets(A_ , A_ )
self.fill_mask_with_duplicate_targets_and_top_k(A_ , A_ )
self.fill_mask_with_multiple_masks(A_ , A_ )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = tokenizer.get_vocab()
_UpperCAmelCase : int = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase : Any = FillMaskPipeline(model=A_ , tokenizer=A_ , targets=A_ )
_UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
_UpperCAmelCase : Tuple = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , A_ )
_UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(A_ ) )
# Call argument
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=A_ , tokenizer=A_ )
_UpperCAmelCase : List[Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=A_ )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
_UpperCAmelCase : Optional[Any] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , A_ )
_UpperCAmelCase : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(A_ ) )
# Score equivalence
_UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , targets=A_ )
_UpperCAmelCase : Tuple = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase : Union[str, Any] = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(A_ ) == set(A_ ):
_UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=A_ )
_UpperCAmelCase : List[str] = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
# Raises with invalid
with self.assertRaises(A_ ):
_UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(A_ ):
_UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""] )
with self.assertRaises(A_ ):
_UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="" )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=A_ , tokenizer=A_ , top_k=2 )
_UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
_UpperCAmelCase : int = FillMaskPipeline(model=A_ , tokenizer=A_ )
_UpperCAmelCase : Optional[Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = tokenizer.get_vocab()
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=A_ , tokenizer=A_ )
# top_k=2, ntargets=3
_UpperCAmelCase : Tuple = sorted(vocab.keys() )[:3]
_UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=A_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase : Tuple = [el["token_str"] for el in sorted(A_ , key=lambda A_ : x["score"] , reverse=A_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(A_ ).issubset(A_ ):
_UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=A_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = FillMaskPipeline(model=A_ , tokenizer=A_ )
_UpperCAmelCase : Dict = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase : Dict = sorted(vocab.keys() )[:3]
_UpperCAmelCase : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase : Dict = fill_masker(f'My name is {tokenizer.mask_token}' , targets=A_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(A_ ) , 3 )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = FillMaskPipeline(model=A_ , tokenizer=A_ )
_UpperCAmelCase : List[str] = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
A_ , [
[
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
],
[
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
],
[
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
],
] , )
| 467 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
_snake_case =StableDiffusionInstructPixaPixPipeline
_snake_case =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
_snake_case =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case =IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self: Tuple ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase_ =PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
torch.manual_seed(0 )
UpperCAmelCase_ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ =CLIPTextModel(_lowerCAmelCase )
UpperCAmelCase_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Any , _lowerCAmelCase: Tuple=0 ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self: Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ ="french fries"
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ =[inputs["prompt"]] * 2
UpperCAmelCase_ =np.array(inputs["image"] ).astype(np.floataa ) / 2_55.0
UpperCAmelCase_ =torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase )
UpperCAmelCase_ =image / 2 + 0.5
UpperCAmelCase_ =image.permute(0 , 3 , 1 , 2 )
UpperCAmelCase_ =image.repeat(2 , 1 , 1 , 1 )
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCAmelCase_ =np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" )
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =self.get_dummy_inputs(_lowerCAmelCase )
UpperCAmelCase_ =sd_pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =[round(_lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(_lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self: str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
UpperCAmelCase_ =VaeImageProcessor(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="pt" ) )[0]
UpperCAmelCase_ =components["vae"]
UpperCAmelCase_ =self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCAmelCase_ =pipe(**_lowerCAmelCase )[0]
UpperCAmelCase_ =np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCAmelCase , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Tuple=0 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
UpperCAmelCase_ ={
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ =np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase )
UpperCAmelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ =np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase )
UpperCAmelCase_ =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ =np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =0
def callback_fn(_lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor ) -> None:
UpperCAmelCase_ =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ =latents[0, -3:, -3:, -1]
UpperCAmelCase_ =np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCAmelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ =latents[0, -3:, -3:, -1]
UpperCAmelCase_ =np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCAmelCase_ =False
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ =self.get_inputs()
UpperCAmelCase_ =pipe(**_lowerCAmelCase )
UpperCAmelCase_ =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase__ ( self: Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ =inputs["image"].resize((504, 504) )
UpperCAmelCase_ ="timbrooks/instruct-pix2pix"
UpperCAmelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ =pipe(**_lowerCAmelCase )
UpperCAmelCase_ =output.images[0]
UpperCAmelCase_ =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCAmelCase_ =np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 54 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : str = ""
else:
_lowerCAmelCase : Optional[int] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Any = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Dict = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
_lowerCAmelCase : Dict = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCAmelCase : Optional[int] = 1_000
_lowerCAmelCase : Optional[Any] = "huggingface/label-files"
_lowerCAmelCase : Tuple = "imagenet-1k-id2label.json"
_lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = idalabel
_lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Dict = int(deit_name[-6:-4] )
_lowerCAmelCase : Any = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
_lowerCAmelCase : List[str] = 192
_lowerCAmelCase : int = 768
_lowerCAmelCase : int = 12
_lowerCAmelCase : Any = 3
elif deit_name[9:].startswith("small" ):
_lowerCAmelCase : int = 384
_lowerCAmelCase : Tuple = 1_536
_lowerCAmelCase : Dict = 12
_lowerCAmelCase : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
_lowerCAmelCase : List[str] = 1_024
_lowerCAmelCase : Tuple = 4_096
_lowerCAmelCase : Optional[Any] = 24
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : Any = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : Optional[Any] = timm_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
_lowerCAmelCase : Any = DeiTForImageClassificationWithTeacher(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
_lowerCAmelCase : Optional[int] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_lowerCAmelCase : Any = DeiTImageProcessor(size=_lowerCamelCase , crop_size=config.image_size )
_lowerCAmelCase : str = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : int = encoding["pixel_values"]
_lowerCAmelCase : int = model(_lowerCamelCase )
_lowerCAmelCase : Tuple = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 500 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_a : str = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
_a : Tuple = Image.open(requests.get(__a ,stream=__a ).raw ).convert('''RGB''' )
return image
def __UpperCAmelCase ( __a : Tuple ) -> List[str]:
"""simple docstring"""
_a : Tuple = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : Any ) -> Dict:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : Tuple = val
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Dict ) -> Optional[int]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_a : List[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
_a : List[str] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
_a : Optional[Any] = torch.cat((q_bias, torch.zeros_like(__a ,requires_grad=__a ), v_bias) )
_a : Optional[int] = qkv_bias
def __UpperCAmelCase ( __a : Tuple ) -> List[Any]:
"""simple docstring"""
_a : int = 364 if '''coco''' in model_name else 224
_a : str = InstructBlipVisionConfig(image_size=__a ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_a : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' ,dense_act_fn='''gelu''' ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_a : str = TaConfig.from_pretrained('''google/flan-t5-xxl''' ,dense_act_fn='''gelu''' ,bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_a : int = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' ,vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
_a : int = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' ,vocab_size=32_001 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_a : List[str] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
_a : List[Any] = InstructBlipConfig(vision_config=__a ,text_config=__a ,qformer_config=__a )
return config, image_size
@torch.no_grad()
def __UpperCAmelCase ( __a : Any ,__a : Tuple=None ,__a : List[str]=False ) -> str:
"""simple docstring"""
_a : List[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' ,truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
_a : Any = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' ,truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_a : Optional[Any] = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' ,truncation_side='''left''' ,bos_token='''</s>''' ,unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
_a , _a : str = get_blipa_config(__a )
_a : Optional[int] = InstructBlipForConditionalGeneration(__a ).eval()
_a : Optional[int] = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
_a , _a : Optional[int] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
_a : List[str] = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
_a : Union[str, Any] = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
_a , _a , _a : Any = load_model_and_preprocess(
name=__a ,model_type=__a ,is_eval=__a ,device=__a )
original_model.eval()
print('''Done!''' )
# update state dict keys
_a : Any = original_model.state_dict()
_a : Optional[Any] = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_a : str = state_dict.pop(__a )
if key.startswith('''Qformer.bert''' ):
_a : List[Any] = key.replace('''Qformer.bert''' ,'''qformer''' )
if "attention.self" in key:
_a : Tuple = key.replace('''self''' ,'''attention''' )
if "llm_proj" in key:
_a : Union[str, Any] = key.replace('''llm_proj''' ,'''language_projection''' )
if "t5_proj" in key:
_a : int = key.replace('''t5_proj''' ,'''language_projection''' )
if key.startswith('''llm_model''' ):
_a : List[str] = key.replace('''llm_model''' ,'''language_model''' )
if key.startswith('''t5''' ):
_a : List[str] = key.replace('''t5''' ,'''language''' )
_a : Union[str, Any] = val
# read in qv biases
read_in_q_v_bias(__a ,__a )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__a ,strict=__a )
_a : Optional[Any] = load_demo_image()
_a : Union[str, Any] = '''What is unusual about this image?'''
# create processor
_a : Tuple = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} ,image_mean=__a ,image_std=__a )
_a : Dict = InstructBlipProcessor(
image_processor=__a ,tokenizer=__a ,qformer_tokenizer=__a ,)
_a : str = processor(images=__a ,text=__a ,return_tensors='''pt''' ).to(__a )
# make sure processor creates exact same pixel values
_a : Optional[Any] = vis_processors['''eval'''](__a ).unsqueeze(0 ).to(__a )
_a : List[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,__a )
original_model.to(__a )
hf_model.to(__a )
with torch.no_grad():
if "vicuna" in model_name:
_a : Any = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
_a : Optional[int] = hf_model(**__a ).logits
else:
_a : Optional[int] = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
_a : Optional[Any] = tokenizer('''\n''' ,return_tensors='''pt''' ).input_ids.to(__a )
_a : int = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-100 )
_a : int = hf_model(**__a ,labels=__a ).logits
print('''First values of original logits:''' ,original_logits[0, :3, :3] )
print('''First values of HF logits:''' ,logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_a : Optional[Any] = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) ,__a ,atol=__a )
print('''Looks ok!''' )
print('''Generating with original model...''' )
_a : Union[str, Any] = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} ,num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
_a : Optional[int] = hf_model.generate(
**__a ,do_sample=__a ,num_beams=5 ,max_length=256 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,)
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_a : Optional[Any] = 2
print('''Original generation:''' ,__a )
_a : Dict = processor.batch_decode(__a ,skip_special_tokens=__a )
_a : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' ,__a )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__a )
hf_model.save_pretrained(__a )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
a__ = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
a__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 578 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
a__ = None
a__ = {
'''7B''': 11008,
'''13B''': 13824,
'''30B''': 17920,
'''65B''': 22016,
'''70B''': 28672,
}
a__ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def __UpperCAmelCase ( __a : str ,__a : Optional[int]=1 ,__a : Any=256 ) -> Optional[Any]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __UpperCAmelCase ( __a : Any ) -> Any:
"""simple docstring"""
with open(__a ,'''r''' ) as f:
return json.load(__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ) -> int:
"""simple docstring"""
with open(__a ,'''w''' ) as f:
json.dump(__a ,__a )
def __UpperCAmelCase ( __a : Optional[int] ,__a : Optional[Any] ,__a : int ,__a : Any=True ) -> Union[str, Any]:
"""simple docstring"""
os.makedirs(__a ,exist_ok=__a )
_a : Optional[Any] = os.path.join(__a ,'''tmp''' )
os.makedirs(__a ,exist_ok=__a )
_a : Any = read_json(os.path.join(__a ,'''params.json''' ) )
_a : Optional[int] = NUM_SHARDS[model_size]
_a : Optional[int] = params['''n_layers''']
_a : List[str] = params['''n_heads''']
_a : Union[str, Any] = n_heads // num_shards
_a : str = params['''dim''']
_a : Optional[Any] = dim // n_heads
_a : str = 1_00_00.0
_a : Union[str, Any] = 1.0 / (base ** (torch.arange(0 ,__a ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_a : str = params['''n_kv_heads'''] # for GQA / MQA
_a : Union[str, Any] = n_heads_per_shard // num_key_value_heads
_a : List[str] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_a : Optional[Any] = n_heads
_a : Union[str, Any] = n_heads_per_shard
_a : List[Any] = dim
# permute for sliced rotary
def permute(__a : Optional[Any] ,__a : Dict=n_heads ,__a : Dict=dim ,__a : Tuple=dim ):
return w.view(__a ,dima // n_heads // 2 ,2 ,__a ).transpose(1 ,2 ).reshape(__a ,__a )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_a : Any = torch.load(os.path.join(__a ,'''consolidated.00.pth''' ) ,map_location='''cpu''' )
else:
# Sharded
_a : Tuple = [
torch.load(os.path.join(__a ,F"""consolidated.{i:02d}.pth""" ) ,map_location='''cpu''' )
for i in range(__a )
]
_a : List[Any] = 0
_a : Optional[int] = {'''weight_map''': {}}
for layer_i in range(__a ):
_a : Any = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
_a : List[str] = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_a : int = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
_a : Optional[int] = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(__a ,__a ,__a )
for i in range(__a )
] ,dim=0 ,).reshape(__a ,__a ) )
_a : int = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
__a ,__a ,__a )
for i in range(__a )
] ,dim=0 ,).reshape(__a ,__a ) ,__a ,__a ,__a ,)
_a : List[Any] = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
__a ,__a ,__a )
for i in range(__a )
] ,dim=0 ,).reshape(__a ,__a )
_a : Dict = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(__a )] ,dim=1 )
_a : Optional[Any] = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(__a )] ,dim=0 )
_a : str = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(__a )] ,dim=1 )
_a : Dict = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(__a )] ,dim=0 )
_a : Any = inv_freq
for k, v in state_dict.items():
_a : Optional[int] = filename
param_count += v.numel()
torch.save(__a ,os.path.join(__a ,__a ) )
_a : int = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
_a : int = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
_a : List[str] = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(__a )] ,dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(__a )] ,dim=0 ),
}
for k, v in state_dict.items():
_a : Any = filename
param_count += v.numel()
torch.save(__a ,os.path.join(__a ,__a ) )
# Write configs
_a : Tuple = {'''total_size''': param_count * 2}
write_json(__a ,os.path.join(__a ,'''pytorch_model.bin.index.json''' ) )
_a : Any = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
_a : int = params['''multiple_of'''] if '''multiple_of''' in params else 256
_a : Union[str, Any] = LlamaConfig(
hidden_size=__a ,intermediate_size=compute_intermediate_size(__a ,__a ,__a ) ,num_attention_heads=params['''n_heads'''] ,num_hidden_layers=params['''n_layers'''] ,rms_norm_eps=params['''norm_eps'''] ,num_key_value_heads=__a ,)
config.save_pretrained(__a )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
_a : Optional[Any] = LlamaForCausalLM.from_pretrained(__a ,torch_dtype=torch.floataa ,low_cpu_mem_usage=__a )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(__a ,safe_serialization=__a )
shutil.rmtree(__a )
def __UpperCAmelCase ( __a : Tuple ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : Optional[int] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
_a : List[Any] = tokenizer_class(__a )
tokenizer.save_pretrained(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' ,help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' ,)
parser.add_argument(
'''--model_size''' ,choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] ,)
parser.add_argument(
'''--output_dir''' ,help='''Location to write HF model and tokenizer''' ,)
parser.add_argument('''--safe_serialization''' ,type=__a ,help='''Whether or not to save using `safetensors`.''' )
_a : List[Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
_a : List[Any] = os.path.join(args.input_dir ,'''tokenizer.model''' )
write_tokenizer(args.output_dir ,__a )
if __name__ == "__main__":
main()
| 578 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = KandinskyVaaControlnetPipeline
UpperCamelCase_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCamelCase_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCamelCase_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase_ = False
@property
def __A ( self : Any ):
'''simple docstring'''
return 32
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def __A ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def __A ( self : Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self : Tuple ):
'''simple docstring'''
return 100
@property
def __A ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : int = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def __A ( self : Any ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __A ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.dummy_unet
SCREAMING_SNAKE_CASE : Dict = self.dummy_movq
SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __A ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create hint
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[int] = output.images
SCREAMING_SNAKE_CASE : List[Any] = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(UpperCamelCase__ ) ).float() / 255.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : int = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipeline(
image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , hint=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 248 | from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__UpperCamelCase : Any = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
__UpperCamelCase : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
__UpperCamelCase : List[Any] = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def A ( _lowercase , _lowercase ):
return float((preds == labels).mean() )
def A ( _lowercase , _lowercase , _lowercase="binary" ):
SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = float(fa_score(y_true=_lowercase , y_pred=_lowercase , average=_lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = {}
for id_pred, label in zip(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
SCREAMING_SNAKE_CASE : List[str] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
SCREAMING_SNAKE_CASE : int = [(pred, label)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], []
for question, preds_labels in question_map.items():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = zip(*_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = fa_score(y_true=_lowercase , y_pred=_lowercase , average='''macro''' )
fas.append(_lowercase )
SCREAMING_SNAKE_CASE : str = int(sum(pred == label for pred, label in preds_labels ) == len(_lowercase ) )
ems.append(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = float(sum(_lowercase ) / len(_lowercase ) )
SCREAMING_SNAKE_CASE : Tuple = sum(_lowercase ) / len(_lowercase )
SCREAMING_SNAKE_CASE : int = float(fa_score(y_true=_lowercase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase__ ( datasets.Metric):
def __A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __A ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase__ , UpperCamelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ , fa_avg='''macro''' )
elif self.config_name == "record":
SCREAMING_SNAKE_CASE : List[str] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
SCREAMING_SNAKE_CASE : int = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(UpperCamelCase__ , UpperCamelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 248 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = False
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
lowerCAmelCase__ = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
lowerCAmelCase__ = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
lowerCAmelCase__ = reader.read()
lowerCAmelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
lowerCAmelCase__ = UNetaDModel(**config)
else:
lowerCAmelCase__ = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
lowerCAmelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase__ = config[key]
del config[key]
lowerCAmelCase__ = [k.replace('UNetRes', '') for k in config['down_block_types']]
lowerCAmelCase__ = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
lowerCAmelCase__ = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
lowerCAmelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
lowerCAmelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
lowerCAmelCase__ = param_value
lowerCAmelCase__ = True
if not has_changed:
lowerCAmelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Tuple = logging.get_logger(__name__)
snake_case : List[Any] = '▁'
snake_case : Tuple = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
snake_case : Optional[Any] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
snake_case : str = {
'facebook/s2t-small-librispeech-asr': 1_024,
}
snake_case : Optional[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
snake_case : Union[str, Any] = {'mustc': MUSTC_LANGS}
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[Any] = MAX_MODEL_INPUT_SIZES
UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
UpperCamelCase : List[int] = []
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
"""simple docstring"""
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_upper_case=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , lang_codes=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowercase = do_upper_case
__lowercase = do_lower_case
__lowercase = load_json(__UpperCAmelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = spm_file
__lowercase = load_spm(__UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__lowercase = lang_codes
__lowercase = LANGUAGES[lang_codes]
__lowercase = [F'''<lang:{lang}>''' for lang in self.langs]
__lowercase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
__lowercase = self.lang_tokens
__lowercase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__lowercase = {}
@property
def __magic_name__ ( self ):
"""simple docstring"""
return len(self.encoder )
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = new_tgt_lang
self.set_tgt_lang_special_tokens(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.lang_code_to_id[tgt_lang]
__lowercase = [lang_code_id]
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder[self.unk_token] )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = []
__lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__lowercase = self.sp_model.decode(__UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__lowercase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowercase = self.sp_model.decode(__UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
__lowercase = [1] * len(self.prefix_tokens )
__lowercase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase = {}
__lowercase = load_spm(self.spm_file , self.sp_model_kwargs )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = Path(__UpperCAmelCase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
__lowercase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__lowercase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (str(__UpperCAmelCase ), str(__UpperCAmelCase ))
def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : Dict[str, Any] ):
'''simple docstring'''
__lowercase = sentencepiece.SentencePieceProcessor(**__UpperCamelCase )
spm.Load(str(__UpperCamelCase ) )
return spm
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" ) as f:
return json.load(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=2 )
| 566 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : Tuple = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
a__ : List[str] = False
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Tuple:
return 12
@property
def _snake_case ( self ) -> Any:
return 12
@property
def _snake_case ( self ) -> int:
return 32
@property
def _snake_case ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _snake_case ( self ) -> List[str]:
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _snake_case ( self ) -> Any:
torch.manual_seed(0 )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase_ )
@property
def _snake_case ( self ) -> int:
torch.manual_seed(0 )
snake_case__ = 12
snake_case__ = 12
snake_case__ = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case__ = TransformeraDModel(**UpperCamelCase_ )
return model
def _snake_case ( self ) -> List[str]:
snake_case__ = 'cpu'
snake_case__ = self.dummy_vqvae
snake_case__ = self.dummy_text_encoder
snake_case__ = self.dummy_tokenizer
snake_case__ = self.dummy_transformer
snake_case__ = VQDiffusionScheduler(self.num_embed )
snake_case__ = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase_ )
snake_case__ = VQDiffusionPipeline(
vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
snake_case__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'teddy bear playing in the pool'
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='np' )
snake_case__ = output.images
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = pipe(
[prompt] , generator=UpperCamelCase_ , output_type='np' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case__ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Dict:
snake_case__ = 'cpu'
snake_case__ = self.dummy_vqvae
snake_case__ = self.dummy_text_encoder
snake_case__ = self.dummy_tokenizer
snake_case__ = self.dummy_transformer
snake_case__ = VQDiffusionScheduler(self.num_embed )
snake_case__ = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case__ = VQDiffusionPipeline(
vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
snake_case__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'teddy bear playing in the pool'
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='np' )
snake_case__ = output.images
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = pipe(
[prompt] , generator=UpperCamelCase_ , output_type='np' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case__ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case__ = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case__ = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCamelCase_ , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 368 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->str:
snake_case__ = BeautifulSoup(requests.get(UpperCAmelCase_ , params=UpperCAmelCase_ ).content , 'html.parser' )
snake_case__ = soup.find('div' , attrs={'class': 'gs_ri'} )
snake_case__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
a__ : int = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 368 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : int = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class snake_case_( a__ , a__ ):
__UpperCamelCase = '''convnextv2'''
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : Union[str, Any]=1E-12 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : int=2_2_4 , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : List[Any] = num_stages
lowerCAmelCase : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
lowerCAmelCase : Any = [3, 3, 9, 3] if depths is None else depths
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : List[Any] = drop_path_rate
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Any = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase, lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 637 |
"""simple docstring"""
from __future__ import annotations
class snake_case_:
def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ):
lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self : Dict ):
# searches pattern in text and returns index positions
lowerCAmelCase : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase : int = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case__ : str = '''ABAABA'''
snake_case__ : List[str] = '''AB'''
snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern)
snake_case__ : Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 637 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase :ClassVar[Features] = Features({'''audio''': Audio()} )
lowerCamelCase :ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
lowerCamelCase :str = "audio"
lowerCamelCase :str = "transcription"
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowerCAmelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
_A = copy.deepcopy(self )
_A = self.input_schema.copy()
_A = features[self.audio_column]
_A = input_schema
return task_template
@property
def UpperCAmelCase ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 401 | from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_SCREAMING_SNAKE_CASE = HfArgumentParser(InitializationArguments)
_SCREAMING_SNAKE_CASE = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_SCREAMING_SNAKE_CASE = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 401 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 1_6
snake_case = 3_2
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = 16 ) -> List[str]:
_snake_case = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_snake_case = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_snake_case = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case = 16
elif accelerator.mixed_precision != "no":
_snake_case = 8
else:
_snake_case = None
return tokenizer.pad(
lowerCAmelCase_ , padding='''longest''' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_snake_case = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_snake_case = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase_ ) == "1":
_snake_case = 2
# New Code #
_snake_case = int(args.gradient_accumulation_steps )
# Initialize accelerator
_snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case = config['''lr''']
_snake_case = int(config['''num_epochs'''] )
_snake_case = int(config['''seed'''] )
_snake_case = int(config['''batch_size'''] )
_snake_case = evaluate.load('''glue''' , '''mrpc''' )
set_seed(lowerCAmelCase_ )
_snake_case , _snake_case = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case = model.to(accelerator.device )
# Instantiate optimizer
_snake_case = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_snake_case = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase_ ):
_snake_case = model(**lowerCAmelCase_ )
_snake_case = output.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case = model(**lowerCAmelCase_ )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ )
def snake_case ( ) -> Optional[int]:
_snake_case = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCAmelCase_ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_snake_case = parser.parse_args()
_snake_case = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 404 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : List[str] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any]=0 ):
"""simple docstring"""
_snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCamelCase ) )
_snake_case = np.random.RandomState(__lowerCamelCase )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# warmup pass to apply optimizations
_snake_case = pipe(**self.get_dummy_inputs() )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = ort.SessionOptions()
_snake_case = False
return options
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCamelCase , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((7_6_8, 5_1_2) )
_snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCamelCase , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 404 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCamelCase__ ( _UpperCamelCase ):
UpperCamelCase__ =['vqvae']
def __init__( self : Union[str, Any] , lowercase__ : AutoencoderKL , lowercase__ : UNetaDConditionModel , lowercase__ : Mel , lowercase__ : Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__ , mel=lowercase__ , vqvae=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return 50 if isinstance(self.scheduler , lowercase__ ) else 10_00
@torch.no_grad()
def __call__( self : str , lowercase__ : int = 1 , lowercase__ : str = None , lowercase__ : np.ndarray = None , lowercase__ : int = 0 , lowercase__ : int = 0 , lowercase__ : int = None , lowercase__ : torch.Generator = None , lowercase__ : float = 0 , lowercase__ : float = 0 , lowercase__ : torch.Generator = None , lowercase__ : float = 0 , lowercase__ : torch.Tensor = None , lowercase__ : torch.Tensor = None , lowercase__ : Optional[int]=True , ):
_lowerCAmelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase__ )
_lowerCAmelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase__ , device=self.device , )
_lowerCAmelCase = noise
_lowerCAmelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase__ , lowercase__ )
_lowerCAmelCase = self.mel.audio_slice_to_image(lowercase__ )
_lowerCAmelCase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase = (input_image / 2_55) * 2 - 1
_lowerCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase = self.vqvae.encode(torch.unsqueeze(lowercase__ , 0 ) ).latent_dist.sample(
generator=lowercase__ )[0]
_lowerCAmelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase = self.scheduler.add_noise(lowercase__ , lowercase__ , self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase = self.scheduler.add_noise(lowercase__ , lowercase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase__ ):
_lowerCAmelCase = self.unet(lowercase__ , lowercase__ , lowercase__ )["""sample"""]
else:
_lowerCAmelCase = self.unet(lowercase__ , lowercase__ )["""sample"""]
if isinstance(self.scheduler , lowercase__ ):
_lowerCAmelCase = self.scheduler.step(
model_output=lowercase__ , timestep=lowercase__ , sample=lowercase__ , eta=lowercase__ , generator=lowercase__ , )["""prev_sample"""]
else:
_lowerCAmelCase = self.scheduler.step(
model_output=lowercase__ , timestep=lowercase__ , sample=lowercase__ , generator=lowercase__ , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
_lowerCAmelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase = self.vqvae.decode(lowercase__ )["""sample"""]
_lowerCAmelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCAmelCase = (images * 2_55).round().astype('uint8' )
_lowerCAmelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase__ , mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase = [self.mel.image_to_audio(lowercase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase__ ) )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : List[Image.Image] , lowercase__ : int = 50 ):
assert isinstance(self.scheduler , lowercase__ )
self.scheduler.set_timesteps(lowercase__ )
_lowerCAmelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase = (sample / 2_55) * 2 - 1
_lowerCAmelCase = torch.Tensor(lowercase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase = 1 - alpha_prod_t
_lowerCAmelCase = self.unet(lowercase__ , lowercase__ )["""sample"""]
_lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowercase__ : torch.Tensor , lowercase__ : torch.Tensor , lowercase__ : float ):
_lowerCAmelCase = acos(torch.dot(torch.flatten(lowercase__ ) , torch.flatten(lowercase__ ) ) / torch.norm(lowercase__ ) / torch.norm(lowercase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase__ ) + sin(alpha * theta ) * xa / sin(lowercase__ )
| 192 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : Dict = 'linear'
__magic_name__ : Dict = 'cosine'
__magic_name__ : Optional[int] = 'cosine_with_restarts'
__magic_name__ : List[str] = 'polynomial'
__magic_name__ : Any = 'constant'
__magic_name__ : Union[str, Any] = 'constant_with_warmup'
__magic_name__ : str = 'piecewise_constant'
def A__ (snake_case : Optimizer , snake_case : int = -1 ) -> Optional[Any]:
return LambdaLR(snake_case , lambda snake_case : 1 , last_epoch=snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int = -1 ) -> List[Any]:
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1.0 , snake_case ) )
return 1.0
return LambdaLR(snake_case , snake_case , last_epoch=snake_case )
def A__ (snake_case : Optimizer , snake_case : str , snake_case : int = -1 ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : int = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCamelCase , __UpperCamelCase : Tuple = rule_str.split(""":""" )
__UpperCamelCase : int = int(snake_case )
__UpperCamelCase : Union[str, Any] = float(snake_case )
__UpperCamelCase : Optional[int] = value
__UpperCamelCase : Dict = float(rule_list[-1] )
def create_rules_function(snake_case : List[str] , snake_case : Any ):
def rule_func(snake_case : int ) -> float:
__UpperCamelCase : Any = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCamelCase : Tuple = create_rules_function(snake_case , snake_case )
return LambdaLR(snake_case , snake_case , last_epoch=snake_case )
def A__ (snake_case : int , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : str=-1 ) -> str:
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : float = 0.5 , snake_case : int = -1 ) -> List[str]:
def lr_lambda(snake_case : Dict ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
__UpperCamelCase : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case ) * 2.0 * progress )) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : int = 1 , snake_case : int = -1 ) -> Tuple:
def lr_lambda(snake_case : Optional[int] ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
__UpperCamelCase : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case ) * progress) % 1.0) )) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : str=1e-7 , snake_case : List[str]=1.0 , snake_case : Dict=-1 ) -> Tuple:
__UpperCamelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCamelCase : List[str] = lr_init - lr_end
__UpperCamelCase : Any = num_training_steps - num_warmup_steps
__UpperCamelCase : List[str] = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCamelCase : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(snake_case , snake_case , snake_case )
a__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A__ (snake_case : Union[str, SchedulerType] , snake_case : Optimizer , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : int = 1 , snake_case : float = 1.0 , snake_case : int = -1 , ) -> Dict:
__UpperCamelCase : List[str] = SchedulerType(snake_case )
__UpperCamelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(snake_case , last_epoch=snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(snake_case , step_rules=snake_case , last_epoch=snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(snake_case , num_warmup_steps=snake_case , last_epoch=snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , num_cycles=snake_case , last_epoch=snake_case , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , power=snake_case , last_epoch=snake_case , )
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , last_epoch=snake_case )
| 279 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__=13, snake_case__=7, snake_case__=True, snake_case__=True, snake_case__=True, snake_case__=True, snake_case__=99, snake_case__=32, snake_case__=5, snake_case__=4, snake_case__=37, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=5_12, snake_case__=16, snake_case__=2, snake_case__=0.02, snake_case__=4, ) -> int:
"""simple docstring"""
lowercase_ : Optional[int] = parent
lowercase_ : List[str] = batch_size
lowercase_ : Union[str, Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : List[str] = use_attention_mask
lowercase_ : List[Any] = use_token_type_ids
lowercase_ : str = use_labels
lowercase_ : Dict = vocab_size
lowercase_ : Optional[Any] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Dict = type_vocab_size
lowercase_ : Tuple = type_sequence_label_size
lowercase_ : Tuple = initializer_range
lowercase_ : Union[str, Any] = num_choices
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase_ : int = None
if self.use_attention_mask:
lowercase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Optional[int] = None
if self.use_token_type_ids:
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase_ : str = RoFormerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCAmelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__a : int = True
__a : List[str] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : Union[str, Any] = FlaxRoFormerModelTester(self )
@slow
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ : str = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""", from_pt=_lowerCAmelCase )
lowercase_ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ) -> int:
"""simple docstring"""
lowercase_ : Optional[Any] = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowercase_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase_ : Any = model(_lowerCAmelCase )[0]
lowercase_ : str = 5_00_00
lowercase_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape, _lowerCAmelCase )
lowercase_ : List[Any] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCAmelCase, atol=1E-4 ) ) | 709 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __magic_name__ ( lowercase ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Optional[Any] = int(lowercase )
lowercase_ , lowercase_ , lowercase_ : Tuple = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase=300 ) -> Tuple:
"""simple docstring"""
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def __magic_name__ ( lowercase ) -> Any:
"""simple docstring"""
lowercase_ : int = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowercase_ : Any = f"""{elt:.6f}""" if isinstance(lowercase , lowercase ) else str(lowercase )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCamelCase__ :
'''simple docstring'''
__a : int = 5
__a : int = 0.2
def __init__( self, snake_case__, snake_case__ = None, snake_case__ = True, snake_case__ = None, snake_case__ = 3_00, ) -> str:
"""simple docstring"""
lowercase_ : Tuple = total
lowercase_ : Union[str, Any] = """""" if prefix is None else prefix
lowercase_ : Any = leave
lowercase_ : Any = parent
lowercase_ : str = width
lowercase_ : int = None
lowercase_ : Union[str, Any] = None
lowercase_ : List[Any] = None
def snake_case__ ( self, snake_case__, snake_case__ = False, snake_case__ = None ) -> Dict:
"""simple docstring"""
lowercase_ : str = value
if comment is not None:
lowercase_ : Union[str, Any] = comment
if self.last_value is None:
lowercase_ : Union[str, Any] = time.time()
lowercase_ : Union[str, Any] = value
lowercase_ : Any = None
lowercase_ : Optional[int] = self.warmup
lowercase_ : List[Any] = 1
self.update_bar(snake_case__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total ):
if self.first_calls > 0:
self.first_calls -= 1
lowercase_ : List[str] = time.time()
lowercase_ : Any = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowercase_ : List[Any] = self.elapsed_time / (value - self.start_value)
else:
lowercase_ : Tuple = None
if value >= self.total:
lowercase_ : List[Any] = self.total
lowercase_ : Optional[int] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowercase_ : Optional[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(snake_case__ )
lowercase_ : List[str] = value
lowercase_ : Any = current_time
if self.average_time_per_item is None:
lowercase_ : Dict = 1
else:
lowercase_ : List[Any] = max(int(self.update_every / self.average_time_per_item ), 1 )
def snake_case__ ( self, snake_case__, snake_case__=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : str = """ """ * (len(str(self.total ) ) - len(str(snake_case__ ) )) + str(snake_case__ )
if self.elapsed_time is None:
lowercase_ : Any = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
lowercase_ : str = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
lowercase_ : Any = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : List[str] = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowercase_ : List[str] = disp.display(disp.HTML(self.html_code ), display_id=snake_case__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self ) -> str:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__=None ) -> Dict:
"""simple docstring"""
super().__init__(snake_case__ )
lowercase_ : str = None if column_names is None else [column_names]
lowercase_ : Union[str, Any] = None
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : List[Any] = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowercase_ : int = disp.display(disp.HTML(self.html_code ), display_id=snake_case__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
if self.inner_table is None:
lowercase_ : Optional[int] = [list(values.keys() ), list(values.values() )]
else:
lowercase_ : Union[str, Any] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(snake_case__ )
lowercase_ : List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case__ ( self, snake_case__, snake_case__=None, snake_case__=3_00 ) -> str:
"""simple docstring"""
lowercase_ : Tuple = NotebookProgressBar(snake_case__, prefix=snake_case__, parent=self, width=snake_case__ )
return self.child_bar
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : Tuple = None
self.display()
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self ) -> Any:
"""simple docstring"""
lowercase_ : Union[str, Any] = None
lowercase_ : str = None
lowercase_ : int = False
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, **snake_case__ ) -> str:
"""simple docstring"""
lowercase_ : Optional[int] = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
lowercase_ : Optional[Any] = 0
lowercase_ : Optional[int] = 0
lowercase_ : Optional[Any] = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
lowercase_ : int = NotebookTrainingTracker(state.max_steps, snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, **snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowercase_ : str = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1, comment=f"""Epoch {epoch}/{state.num_train_epochs}""", force_update=self._force_next_update, )
lowercase_ : Dict = False
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__=None, **snake_case__ ) -> int:
"""simple docstring"""
if not has_length(snake_case__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowercase_ : List[str] = self.training_tracker.add_child(len(snake_case__ ) )
else:
lowercase_ : Tuple = NotebookProgressBar(len(snake_case__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, **snake_case__ ) -> List[str]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
lowercase_ : Any = None
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__=None, **snake_case__ ) -> Optional[int]:
"""simple docstring"""
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowercase_ : Dict = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
lowercase_ : Tuple = state.global_step
self.training_tracker.write_line(snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__=None, **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
if self.training_tracker is not None:
lowercase_ : Union[str, Any] = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
lowercase_ : str = log["""loss"""]
break
if self.first_column == "Epoch":
lowercase_ : List[str] = int(state.epoch )
else:
lowercase_ : Union[str, Any] = state.global_step
lowercase_ : int = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
lowercase_ : Union[str, Any] = re.sub(r"""\_loss$""", """""", snake_case__ )
lowercase_ : Optional[int] = metrics.pop("""total_flos""", snake_case__ )
lowercase_ : Optional[int] = metrics.pop("""epoch""", snake_case__ )
lowercase_ : Dict = metrics.pop(f"""{metric_key_prefix}_runtime""", snake_case__ )
lowercase_ : int = metrics.pop(f"""{metric_key_prefix}_samples_per_second""", snake_case__ )
lowercase_ : Union[str, Any] = metrics.pop(f"""{metric_key_prefix}_steps_per_second""", snake_case__ )
lowercase_ : List[Any] = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""", snake_case__ )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
lowercase_ : Optional[int] = v
else:
lowercase_ : Tuple = k.split("""_""" )
lowercase_ : List[Any] = """ """.join([part.capitalize() for part in splits[1:]] )
lowercase_ : str = v
self.training_tracker.write_line(snake_case__ )
self.training_tracker.remove_child()
lowercase_ : List[str] = None
# Evaluation takes a long time so we should force the next update.
lowercase_ : str = True
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, **snake_case__ ) -> Optional[int]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""", force_update=snake_case__ )
lowercase_ : List[Any] = None | 436 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case( lowercase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = KandinskyVaaPriorPipeline
UpperCAmelCase : Optional[Any] = ["prompt"]
UpperCAmelCase : str = ["prompt", "negative_prompt"]
UpperCAmelCase : List[Any] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
UpperCAmelCase : Tuple = False
@property
def __snake_case ( self ) -> Optional[int]:
return 32
@property
def __snake_case ( self ) -> Tuple:
return 32
@property
def __snake_case ( self ) -> int:
return self.time_input_dim
@property
def __snake_case ( self ) -> str:
return self.time_input_dim * 4
@property
def __snake_case ( self ) -> Optional[int]:
return 100
@property
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_ )
@property
def __snake_case ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
lowerCAmelCase = PriorTransformer(**lowercase_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCAmelCase = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __snake_case ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
lowerCAmelCase = CLIPVisionModelWithProjection(lowercase_ )
return model
@property
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.dummy_prior
lowerCAmelCase = self.dummy_image_encoder
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_image_processor
lowerCAmelCase = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=1_0.0 , )
lowerCAmelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def __snake_case ( self , A_ , A_=0 ) -> Tuple:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __snake_case ( self ) -> int:
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase_ )
lowerCAmelCase = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase = pipe(**self.get_dummy_inputs(lowercase_ ) )
lowerCAmelCase = output.image_embeds
lowerCAmelCase = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
lowerCAmelCase = image[0, -10:]
lowerCAmelCase = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
lowerCAmelCase = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = torch_device == """cpu"""
lowerCAmelCase = True
lowerCAmelCase = False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = torch_device == """cpu"""
lowerCAmelCase = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , ) | 433 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A : Union[str, Any] = pytest.mark.integration
@require_faiss
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__magic_name__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __A ( self : Optional[Any] ) -> Any:
import faiss
SCREAMING_SNAKE_CASE_ = self._create_dummy_dataset()
SCREAMING_SNAKE_CASE_ = dset.map(
lambda __magic_name__ , __magic_name__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__magic_name__ , keep_in_memory=__magic_name__ )
SCREAMING_SNAKE_CASE_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __A ( self : Any ) -> int:
import faiss
SCREAMING_SNAKE_CASE_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __A ( self : List[Any] ) -> Union[str, Any]:
import faiss
SCREAMING_SNAKE_CASE_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__magic_name__ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __A ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__magic_name__ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __A ( self : str ) -> str:
from elasticsearch import Elasticsearch
SCREAMING_SNAKE_CASE_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
SCREAMING_SNAKE_CASE_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
SCREAMING_SNAKE_CASE_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
SCREAMING_SNAKE_CASE_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self : int ) -> Tuple:
import faiss
SCREAMING_SNAKE_CASE_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
SCREAMING_SNAKE_CASE_ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = index.search(__magic_name__ )
self.assertRaises(__magic_name__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
SCREAMING_SNAKE_CASE_ = np.eye(5 , dtype=np.floataa )[::-1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = index.search_batch(__magic_name__ )
self.assertRaises(__magic_name__ , index.search_batch , queries[0] )
SCREAMING_SNAKE_CASE_ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__magic_name__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __magic_name__ )
def __A ( self : Optional[Any] ) -> Optional[Any]:
import faiss
SCREAMING_SNAKE_CASE_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
SCREAMING_SNAKE_CASE_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __A ( self : Optional[int] ) -> Optional[Any]:
import faiss
SCREAMING_SNAKE_CASE_ = faiss.IndexFlat(5 )
SCREAMING_SNAKE_CASE_ = FaissIndex(custom_index=__magic_name__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __A ( self : Union[str, Any] ) -> int:
import faiss
SCREAMING_SNAKE_CASE_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__magic_name__ ) as tmp_file:
index.save(tmp_file.name )
SCREAMING_SNAKE_CASE_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
SCREAMING_SNAKE_CASE_ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = index.search(__magic_name__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a__ ( __UpperCamelCase ):
import faiss
SCREAMING_SNAKE_CASE_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
SCREAMING_SNAKE_CASE_ = "index.faiss"
SCREAMING_SNAKE_CASE_ = F'''mock://{index_name}'''
index.save(__UpperCamelCase , storage_options=mockfs.storage_options )
SCREAMING_SNAKE_CASE_ = FaissIndex.load(__UpperCamelCase , storage_options=mockfs.storage_options )
SCREAMING_SNAKE_CASE_ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = index.search(__UpperCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self : int ) -> int:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
SCREAMING_SNAKE_CASE_ = Elasticsearch()
SCREAMING_SNAKE_CASE_ = {"acknowledged": True}
SCREAMING_SNAKE_CASE_ = ElasticSearchIndex(es_client=__magic_name__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
SCREAMING_SNAKE_CASE_ = "foo"
SCREAMING_SNAKE_CASE_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = index.search(__magic_name__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
SCREAMING_SNAKE_CASE_ = "foo"
SCREAMING_SNAKE_CASE_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = index.search(__magic_name__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
SCREAMING_SNAKE_CASE_ = ["foo", "bar", "foobar"]
SCREAMING_SNAKE_CASE_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = index.search_batch(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__magic_name__ ) , 0 )
self.assertListEqual([1, 1, 1] , __magic_name__ )
# batched queries with timeout
SCREAMING_SNAKE_CASE_ = ["foo", "bar", "foobar"]
SCREAMING_SNAKE_CASE_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = index.search_batch(__magic_name__ , request_timeout=30 )
SCREAMING_SNAKE_CASE_ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__magic_name__ ) , 0 )
self.assertListEqual([1, 1, 1] , __magic_name__ )
| 140 | import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=14 , __magic_name__ : Dict=7 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=True , __magic_name__ : List[Any]=False , __magic_name__ : Union[str, Any]=True , __magic_name__ : List[str]=99 , __magic_name__ : Any=32 , __magic_name__ : Optional[Any]=4 , __magic_name__ : str=4 , __magic_name__ : Optional[int]=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Optional[int]=512 , __magic_name__ : int=0.02 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = rotary_dim
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = vocab_size - 1
SCREAMING_SNAKE_CASE_ = vocab_size - 1
SCREAMING_SNAKE_CASE_ = vocab_size - 1
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__magic_name__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __A ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __A ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> int:
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model.init_cache(input_ids.shape[0] , __magic_name__ )
SCREAMING_SNAKE_CASE_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ = model(
input_ids[:, :-1] , attention_mask=__magic_name__ , past_key_values=__magic_name__ , position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
SCREAMING_SNAKE_CASE_ = model(
input_ids[:, -1:] , attention_mask=__magic_name__ , past_key_values=outputs_cache.past_key_values , position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def __A ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(__magic_name__ )
SCREAMING_SNAKE_CASE_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE_ = model.init_cache(input_ids.shape[0] , __magic_name__ )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ = model(
input_ids[:, :-1] , attention_mask=__magic_name__ , past_key_values=__magic_name__ , position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
SCREAMING_SNAKE_CASE_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__magic_name__ , position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCamelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __A ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = FlaxGPTJModelTester(self )
def __A ( self : Tuple ) -> List[str]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@tooslow
def __A ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
SCREAMING_SNAKE_CASE_ = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=__magic_name__ , truncation=__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = model.config.eos_token_id
SCREAMING_SNAKE_CASE_ = jax.jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(__magic_name__ , __magic_name__ )
@is_pt_flax_cross_test
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ = getattr(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = pt_model_class(__magic_name__ ).eval()
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __magic_name__ )
SCREAMING_SNAKE_CASE_ = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model(**__magic_name__ ).to_tuple()
SCREAMING_SNAKE_CASE_ = fx_model(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model_class.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
SCREAMING_SNAKE_CASE_ = fx_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(
len(__magic_name__ ) , len(__magic_name__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ = getattr(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = pt_model_class(__magic_name__ ).eval()
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = load_flax_weights_in_pytorch_model(__magic_name__ , fx_model.params )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model(**__magic_name__ ).to_tuple()
SCREAMING_SNAKE_CASE_ = fx_model(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = pt_model_class.from_pretrained(__magic_name__ , from_flax=__magic_name__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(
len(__magic_name__ ) , len(__magic_name__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __A ( self : Optional[int] ) -> str:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
SCREAMING_SNAKE_CASE_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 140 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__lowerCamelCase : Tuple = logging.get_logger(__name__)
@dataclass
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =[
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Dict , **lowerCamelCase_ : List[str] ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__magic_name__ : Optional[Any] = deprecated_arg[3:]
setattr(self , lowerCamelCase_ , not kwargs.pop(lowerCamelCase_ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
__magic_name__ : Optional[int] = kwargs.pop('''torchscript''' , self.torchscript )
__magic_name__ : int = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__magic_name__ : str = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCamelCase_ )
UpperCamelCase__ =field(default=_lowerCamelCase ,metadata={'''help''': '''Trace the models using torchscript'''} )
UpperCamelCase__ =field(default=_lowerCamelCase ,metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
UpperCamelCase__ =field(
default='''O1''' ,metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} ,)
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__magic_name__ : Any = torch.device('''cpu''' )
__magic_name__ : List[str] = 0
elif is_torch_tpu_available():
__magic_name__ : str = xm.xla_device()
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__magic_name__ : Union[str, Any] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase__ ( self : Any ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase__ ( self : str ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
return self.n_gpu > 0
| 501 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase : int = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 501 | 1 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a_ : List[Any] = float("""nan""")
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =sys.stdout
lowerCamelCase_ =open(lowerCAmelCase, '''a''' )
def __getattr__( self, lowerCAmelCase ):
"""simple docstring"""
return getattr(self.stdout, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
self.stdout.write(lowerCAmelCase )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''', '''''', lowerCAmelCase, 0, re.M ) )
def a_ ( __snake_case : List[Any]=80 , __snake_case : Optional[int]=False ) -> int:
"""simple docstring"""
lowerCamelCase_ =[]
# deal with critical env vars
lowerCamelCase_ =['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
lowerCamelCase_ =os.environ.get(__snake_case , __snake_case )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
lowerCamelCase_ =sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(__snake_case )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
while len(__snake_case ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(__snake_case ) == 0 or len(__snake_case ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__snake_case )
lowerCamelCase_ =''''''
return "\\\n".join(__snake_case )
def a_ ( __snake_case : str , __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
# unwrap multi-line input
lowerCamelCase_ =re.sub(r'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
lowerCamelCase_ =re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
lowerCamelCase_ =re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Union[str, Any] ) -> Any:
"""simple docstring"""
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
lowerCamelCase_ =subprocess.run(__snake_case , capture_output=__snake_case , text=__snake_case )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
lowerCamelCase_ =variation.replace(''' ''' , '''-''' )
with open(Path(__snake_case ) / F'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(__snake_case ) / F'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.load(__snake_case )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def a_ ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : str , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] , ) -> int:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =F'''{id}: {variation:<{longest_variation_len}}'''
lowerCamelCase_ =F'''{preamble}: '''
lowerCamelCase_ =set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__snake_case ) , desc=__snake_case , leave=__snake_case ):
lowerCamelCase_ =process_run_single(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
lowerCamelCase_ =single_run_metrics[target_metric_key]
if not math.isnan(__snake_case ):
metrics.append(__snake_case )
results.append(__snake_case )
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase_ =F'''\33[2K\r{outcome}'''
if len(__snake_case ) > 0:
lowerCamelCase_ ={k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCamelCase_ =round(mean_metrics[target_metric_key] , 2 )
lowerCamelCase_ =F'''{outcome} {mean_target}'''
if len(__snake_case ) > 1:
results_str += F''' {tuple(round(__snake_case , 2 ) for x in results )}'''
print(__snake_case )
lowerCamelCase_ =variation
return mean_metrics
else:
print(__snake_case )
return {variation_key: variation, target_metric_key: nan}
def a_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F'''
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def a_ ( __snake_case : Any , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =pd.DataFrame(__snake_case )
lowerCamelCase_ ='''variation'''
lowerCamelCase_ ='''diff_%'''
lowerCamelCase_ =nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCamelCase_ =df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__snake_case ):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase_ =df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__snake_case ):
lowerCamelCase_ =df.apply(
lambda __snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
lowerCamelCase_ =[variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase_ =df.reindex(__snake_case , axis='''columns''' ) # reorder cols
# capitalize
lowerCamelCase_ =df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
lowerCamelCase_ =df.rename(lambda __snake_case : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
lowerCamelCase_ =df.rename(lambda __snake_case : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
lowerCamelCase_ =['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__snake_case , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__snake_case , floatfmt='''.2f''' )]
print('''\n\n'''.join(__snake_case ) )
def a_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=__snake_case , type=__snake_case , nargs='''+''' , required=__snake_case , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=__snake_case , type=__snake_case , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=__snake_case , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=__snake_case , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=__snake_case , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=__snake_case , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ =args.output_dir
Path(__snake_case ).mkdir(exist_ok=__snake_case )
lowerCamelCase_ =get_base_command(__snake_case , __snake_case )
# split each dimension into its --foo variations
lowerCamelCase_ =[list(map(str.strip , re.split(r'''\|''' , __snake_case ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase_ =list(map(str.strip , map(''' '''.join , itertools.product(*__snake_case ) ) ) )
lowerCamelCase_ =max(len(__snake_case ) for x in variations )
# split wanted keys
lowerCamelCase_ =args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase_ =F'''benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
lowerCamelCase_ =Tee(__snake_case )
print(F'''\n*** Running {len(__snake_case )} benchmarks:''' )
print(F'''Base command: {' '.join(__snake_case )}''' )
lowerCamelCase_ ='''variation'''
lowerCamelCase_ =[]
for id, variation in enumerate(tqdm(__snake_case , desc='''Total completion: ''' , leave=__snake_case ) ):
lowerCamelCase_ =base_cmd + variation.split()
results.append(
process_run(
id + 1 , __snake_case , __snake_case , __snake_case , __snake_case , args.target_metric_key , __snake_case , args.repeat_times , __snake_case , args.verbose , ) )
process_results(__snake_case , args.target_metric_key , __snake_case , args.base_variation , __snake_case )
if __name__ == "__main__":
main()
| 676 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =[True] * limit
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ =i * 2
while index < limit:
lowerCamelCase_ =False
lowerCamelCase_ =index + i
lowerCamelCase_ =[2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def a_ ( __snake_case : int = 100_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =prime_sieve(__snake_case )
lowerCamelCase_ =0
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
lowerCamelCase_ =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ =j - i
lowerCamelCase_ =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 | 1 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : int ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
config.addinivalue_line('''markers''', '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowerCAmelCase__ )
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = tmp_path_factory.getbasetemp() / '''cache'''
_a = test_hf_cache_home / '''datasets'''
_a = test_hf_cache_home / '''metrics'''
_a = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''', str(lowerCAmelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''', str(lowerCAmelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''', str(lowerCAmelCase__ ) )
_a = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''', str(lowerCAmelCase__ ) )
_a = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(lowerCAmelCase__ ) )
@pytest.fixture(autouse=lowerCAmelCase__, scope='''session''' )
def A_ ( ):
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase__ )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''', lowerCAmelCase__ )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''', lowerCAmelCase__ ) | 706 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[int] = ['image_processor', 'tokenizer']
A_ : Union[str, Any] = 'ChineseCLIPImageProcessor'
A_ : Union[str, Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> List[str]:
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
_a = kwargs.pop('''feature_extractor''' )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
_a = self.image_processor
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> List[str]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_a = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self ) -> Tuple:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class | 285 | 0 |
import math
def lowerCamelCase_ ( lowerCAmelCase__ : int = 100 ) -> int:
'''simple docstring'''
A = sum(i * i for i in range(1 , n + 1 ) )
A = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''') | 106 |
# Algorithm for the pigeonhole sorting
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= min(lowercase__ ) # min() finds the minimum value
__lowercase= max(lowercase__ ) # max() finds the maximum value
__lowercase= max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowercase= [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowercase__ , lowercase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowercase= 0
for count in range(lowercase__ ):
while holes[count] > 0:
holes[count] -= 1
__lowercase= count + min_val
i += 1
def _lowerCamelCase( ) -> Dict:
'''simple docstring'''
__lowercase= [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowercase__ )
print('Sorted order is:' , ' '.join(lowercase__ ) )
if __name__ == "__main__":
main()
| 230 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
UpperCamelCase : Optional[int] = 'docs/source/en/_toctree.yml'
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = defaultdict(__lowerCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCamelCase__ = [key for key, value in counts.items() if value > 1]
lowerCamelCase__ = []
for duplicate_key in duplicates:
lowerCamelCase__ = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(__lowerCAmelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : s["title"].lower() )
def A__ ( __lowerCAmelCase : Dict=False ):
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
lowerCamelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase__ = content[api_idx]["""sections"""]
# Then to the model doc
lowerCamelCase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCamelCase__ = api_doc[model_idx]["""sections"""]
lowerCamelCase__ = [(idx, section) for idx, section in enumerate(__lowerCAmelCase ) if """sections""" in section]
lowerCamelCase__ = False
for idx, modality_doc in modalities_docs:
lowerCamelCase__ = modality_doc["""sections"""]
lowerCamelCase__ = clean_model_doc_toc(__lowerCAmelCase )
if old_modality_doc != new_modality_doc:
lowerCamelCase__ = True
if overwrite:
lowerCamelCase__ = new_modality_doc
if diff:
if overwrite:
lowerCamelCase__ = model_doc
lowerCamelCase__ = api_doc
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 9 |
'''simple docstring'''
UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
def A__ ( __lowerCAmelCase : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) )
lowerCamelCase__ = number_chain
while number < 1000_0000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def A__ ( __lowerCAmelCase : int = 1000_0000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 9 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(a_ )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.dummy_uncond_unet
UpperCAmelCase = DDIMScheduler()
UpperCAmelCase = self.dummy_vq_model
UpperCAmelCase = LDMPipeline(unet=a_ , vqvae=a_ , scheduler=a_ )
ldm.to(a_ )
ldm.set_progress_bar_config(disable=a_ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = ldm(generator=a_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = ldm(generator=a_ , num_inference_steps=2 , output_type='numpy' , return_dict=a_ )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a_ )
ldm.set_progress_bar_config(disable=a_ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = ldm(generator=a_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
UpperCAmelCase = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 447 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_a : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowercase_ ( a ):
'''simple docstring'''
def __init__( self , *a_ , **a_ ) -> str:
"""simple docstring"""
super().__init__(*a_ , **a_ )
requires_backends(self , 'decord' )
self.check_model_type(a_ )
def snake_case_ ( self , a_=None , a_=None , a_=None ) -> int:
"""simple docstring"""
UpperCAmelCase = {}
if frame_sampling_rate is not None:
UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase = num_frames
UpperCAmelCase = {}
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , a_ , **a_ ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(a_ , **a_ )
def snake_case_ ( self , a_ , a_=None , a_=1 ) -> Tuple:
"""simple docstring"""
if num_frames is None:
UpperCAmelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCAmelCase = BytesIO(requests.get(a_ ).content )
UpperCAmelCase = VideoReader(a_ )
videoreader.seek(0 )
UpperCAmelCase = 0
UpperCAmelCase = num_frames * frame_sampling_rate - 1
UpperCAmelCase = np.linspace(a_ , a_ , num=a_ , dtype=np.intaa )
UpperCAmelCase = videoreader.get_batch(a_ ).asnumpy()
UpperCAmelCase = list(a_ )
UpperCAmelCase = self.image_processor(a_ , return_tensors=self.framework )
return model_inputs
def snake_case_ ( self , a_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model(**a_ )
return model_outputs
def snake_case_ ( self , a_ , a_=5 ) -> Union[str, Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(a_ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_ , a_ )]
| 447 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : List[Any]=18 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : Union[str, Any]=400 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase_ : List[str]=[0.5, 0.5, 0.5] , ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : List[str] = image_size
_UpperCAmelCase : int = min_resolution
_UpperCAmelCase : int = max_resolution
_UpperCAmelCase : Optional[int] = do_resize
_UpperCAmelCase : str = size if size is not None else {'''height''': 18, '''width''': 20}
_UpperCAmelCase : Any = do_thumbnail
_UpperCAmelCase : List[str] = do_align_axis
_UpperCAmelCase : List[Any] = do_pad
_UpperCAmelCase : Optional[int] = do_normalize
_UpperCAmelCase : str = image_mean
_UpperCAmelCase : List[str] = image_std
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = DonutImageProcessor if is_vision_available() else None
def a_ ( self : Dict ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = DonutImageProcessingTester(self )
@property
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_pad''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''image_std''' ) )
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
_UpperCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
pass
@is_flaky()
def a_ ( self : int ) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_UpperCAmelCase : str = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_UpperCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def a_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 416 |
def _A ( _UpperCamelCase = 1_000 ):
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_UpperCAmelCase : Any = (n * n - 2 * a * n) // (2 * n - 2 * a)
_UpperCAmelCase : Tuple = n - a - b
if c * c == (a * a + b * b):
_UpperCAmelCase : str = a * b * c
if candidate >= product:
_UpperCAmelCase : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 416 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : List[str] = ['input_features', 'is_longer']
def __init__( self : Union[str, Any] ,lowerCamelCase : Union[str, Any]=64 ,lowerCamelCase : Any=4_8000 ,lowerCamelCase : int=480 ,lowerCamelCase : Any=10 ,lowerCamelCase : Dict=1024 ,lowerCamelCase : Union[str, Any]=0.0 ,lowerCamelCase : int=False ,lowerCamelCase : float = 0 ,lowerCamelCase : float = 1_4000 ,lowerCamelCase : int = None ,lowerCamelCase : str = "fusion" ,lowerCamelCase : str = "repeatpad" ,**lowerCamelCase : Dict ,):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase ,sampling_rate=lowerCamelCase ,padding_value=lowerCamelCase ,return_attention_mask=lowerCamelCase ,**lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = top_db
__SCREAMING_SNAKE_CASE = truncation
__SCREAMING_SNAKE_CASE = padding
__SCREAMING_SNAKE_CASE = fft_window_size
__SCREAMING_SNAKE_CASE = (fft_window_size >> 1) + 1
__SCREAMING_SNAKE_CASE = hop_length
__SCREAMING_SNAKE_CASE = max_length_s
__SCREAMING_SNAKE_CASE = max_length_s * sampling_rate
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = frequency_min
__SCREAMING_SNAKE_CASE = frequency_max
__SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCamelCase ,min_frequency=lowerCamelCase ,max_frequency=lowerCamelCase ,sampling_rate=lowerCamelCase ,norm=lowerCamelCase ,mel_scale="""htk""" ,)
__SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCamelCase ,min_frequency=lowerCamelCase ,max_frequency=lowerCamelCase ,sampling_rate=lowerCamelCase ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase__ ( self : str ,lowerCamelCase : np.array ,lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = spectrogram(
lowerCamelCase ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCamelCase ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def UpperCAmelCase__ ( self : int ,lowerCamelCase : int ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__SCREAMING_SNAKE_CASE = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__SCREAMING_SNAKE_CASE = [0]
# randomly choose index for each part
__SCREAMING_SNAKE_CASE = np.random.choice(ranges[0] )
__SCREAMING_SNAKE_CASE = np.random.choice(ranges[1] )
__SCREAMING_SNAKE_CASE = np.random.choice(ranges[2] )
__SCREAMING_SNAKE_CASE = mel[idx_front : idx_front + chunk_frames, :]
__SCREAMING_SNAKE_CASE = mel[idx_middle : idx_middle + chunk_frames, :]
__SCREAMING_SNAKE_CASE = mel[idx_back : idx_back + chunk_frames, :]
__SCREAMING_SNAKE_CASE = torch.tensor(mel[None, None, :] )
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
lowerCamelCase ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=lowerCamelCase )
__SCREAMING_SNAKE_CASE = mel_shrink[0][0].numpy()
__SCREAMING_SNAKE_CASE = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : np.array ,lowerCamelCase : str ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__SCREAMING_SNAKE_CASE = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) - max_length
__SCREAMING_SNAKE_CASE = np.random.randint(0 ,overflow + 1 )
__SCREAMING_SNAKE_CASE = waveform[idx : idx + max_length]
__SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(lowerCamelCase ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(lowerCamelCase ,self.mel_filters )
__SCREAMING_SNAKE_CASE = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__SCREAMING_SNAKE_CASE = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__SCREAMING_SNAKE_CASE = np.stack([mel, mel, mel, mel] ,axis=0 )
__SCREAMING_SNAKE_CASE = False
else:
__SCREAMING_SNAKE_CASE = self._random_mel_fusion(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
__SCREAMING_SNAKE_CASE = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__SCREAMING_SNAKE_CASE = int(max_length / len(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = np.stack(np.tile(lowerCamelCase ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__SCREAMING_SNAKE_CASE = int(max_length / len(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = np.stack(np.tile(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = np.pad(lowerCamelCase ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
__SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(lowerCamelCase ,self.mel_filters )
__SCREAMING_SNAKE_CASE = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
__SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(lowerCamelCase ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Optional[Any] ,lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCamelCase : str = None ,lowerCamelCase : Optional[str] = None ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[Union[str, TensorType]] = None ,**lowerCamelCase : str ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = truncation if truncation is not None else self.truncation
__SCREAMING_SNAKE_CASE = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__SCREAMING_SNAKE_CASE = isinstance(lowerCamelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(lowerCamelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase ,np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase ,dtype=np.floataa )
elif isinstance(lowerCamelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
__SCREAMING_SNAKE_CASE = [
self._get_input_mel(lowerCamelCase ,max_length if max_length else self.nb_max_samples ,lowerCamelCase ,lowerCamelCase )
for waveform in raw_speech
]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__SCREAMING_SNAKE_CASE = np.random.randint(0 ,len(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = True
if isinstance(input_mel[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__SCREAMING_SNAKE_CASE = [[longer] for longer in is_longer]
__SCREAMING_SNAKE_CASE = {"""input_features""": input_mel, """is_longer""": is_longer}
__SCREAMING_SNAKE_CASE = BatchFeature(lowerCamelCase )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 109 | 1 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
UpperCAmelCase__ =datasets.logging.get_logger(__name__)
UpperCAmelCase__ ="\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
UpperCAmelCase__ ="\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
UpperCAmelCase__ ="\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
UpperCAmelCase__ ={
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : Optional[Any] ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
__lowercase = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
__lowercase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowercase = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowercase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowercase = score.BleurtScorer(os.path.join(A_ , A_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , A_ : Dict , A_ : int ):
'''simple docstring'''
__lowercase = self.scorer.score(references=A_ , candidates=A_ )
return {"scores": scores}
| 442 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ =logging.get_logger(__name__)
UpperCAmelCase__ ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCamelCase__ ( _a ):
a : List[Any] = """swinv2"""
a : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , A_ : Any=2_2_4 , A_ : int=4 , A_ : Optional[int]=3 , A_ : List[Any]=9_6 , A_ : List[Any]=[2, 2, 6, 2] , A_ : List[str]=[3, 6, 1_2, 2_4] , A_ : Union[str, Any]=7 , A_ : int=4.0 , A_ : List[str]=True , A_ : str=0.0 , A_ : Any=0.0 , A_ : Union[str, Any]=0.1 , A_ : Optional[Any]="gelu" , A_ : int=False , A_ : List[Any]=0.02 , A_ : Tuple=1e-5 , A_ : Tuple=3_2 , **A_ : int , ):
'''simple docstring'''
super().__init__(**A_ )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(A_ )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(A_ ) - 1) )
__lowercase = (0, 0, 0, 0)
| 442 | 1 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class UpperCamelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
A__ : Any = ["melgan"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> None:
super().__init__()
# From MELGAN
A__ = math.log(1e-5 ) # Matches MelGAN training.
A__ = 4.0 # Largest value for most examples
A__ = 128
self.register_modules(
notes_encoder=snake_case_ , continuous_encoder=snake_case_ , decoder=snake_case_ , scheduler=snake_case_ , melgan=snake_case_ , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=(-1.0, 1.0) , SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
A__ = output_range
if clip:
A__ = torch.clip(snake_case_ , self.min_value , self.max_value )
# Scale to [0, 1].
A__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=(-1.0, 1.0) , SCREAMING_SNAKE_CASE__=False ) -> Any:
A__ = input_range
A__ = torch.clip(snake_case_ , snake_case_ , snake_case_ ) if clip else outputs
# Scale to [0, 1].
A__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
A__ = input_tokens > 0
A__ = self.notes_encoder(
encoder_input_tokens=snake_case_ , encoder_inputs_mask=snake_case_ )
A__ = self.continuous_encoder(
encoder_inputs=snake_case_ , encoder_inputs_mask=snake_case_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = noise_time
if not torch.is_tensor(snake_case_ ):
A__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
A__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
A__ = self.decoder(
encodings_and_masks=snake_case_ , decoder_input_tokens=snake_case_ , decoder_noise_time=snake_case_ )
return logits
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 100 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "numpy" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case_ , snake_case_ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(snake_case_ )}.""" )
A__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
A__ = np.zeros([1, 0, self.n_dims] , np.floataa )
A__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=snake_case_ , device=self.device )
for i, encoder_input_tokens in enumerate(snake_case_ ):
if i == 0:
A__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
A__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=snake_case_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A__ = ones
A__ = self.scale_features(
snake_case_ , output_range=[-1.0, 1.0] , clip=snake_case_ )
A__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=snake_case_ , continuous_mask=snake_case_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=snake_case_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(snake_case_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A__ = self.decode(
encodings_and_masks=snake_case_ , input_tokens=snake_case_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
A__ = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
A__ = self.scale_to_features(snake_case_ , input_range=[-1.0, 1.0] )
A__ = mel[:1]
A__ = mel.cpu().float().numpy()
A__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case_ , snake_case_ )
logger.info("Generated segment" , snake_case_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'." )
if output_type == "numpy":
A__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=snake_case_ )
| 104 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__snake_case = logging.get_logger(__name__)
class _a ( __a ):
"""simple docstring"""
def __init__( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : float , **lowercase_ : Dict ):
'''simple docstring'''
lowercase_ = feature_size
lowercase_ = sampling_rate
lowercase_ = padding_value
lowercase_ = kwargs.pop("""padding_side""" , """right""" )
lowercase_ = kwargs.pop("""return_attention_mask""" , lowercase_ )
super().__init__(**lowercase_ )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowercase_ : Union[bool, str, PaddingStrategy] = True , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowercase_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
lowercase_ = processed_features[self.model_input_names[0]]
lowercase_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
lowercase_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowercase_ = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowercase_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
lowercase_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
lowercase_ = """tf"""
elif is_torch_tensor(lowercase_ ):
lowercase_ = """pt"""
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
lowercase_ = """np"""
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowercase_ = to_numpy(lowercase_ )
else:
lowercase_ = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowercase_ = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
lowercase_ = processed_features[self.model_input_names[0]]
lowercase_ = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
lowercase_ = []
for i in range(lowercase_ ):
lowercase_ = {k: v[i] for k, v in processed_features.items()}
# truncation
lowercase_ = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowercase_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowercase_ = PaddingStrategy.MAX_LENGTH
lowercase_ = {}
for i in range(lowercase_ ):
# padding
lowercase_ = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowercase_ = []
if value.dtype is np.dtype(np.floataa ):
lowercase_ = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def lowerCamelCase__ ( self : Any , lowercase_ : Union[Dict[str, np.ndarray], BatchFeature] , lowercase_ : Optional[int] = None , lowercase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , ):
'''simple docstring'''
lowercase_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowercase_ = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowercase_ = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
lowercase_ = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
lowercase_ = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
lowercase_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowercase_ = np.pad(
lowercase_ , lowercase_ , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowercase_ = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
lowercase_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowercase_ = np.pad(
lowercase_ , lowercase_ , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Union[Dict[str, np.ndarray], BatchFeature] , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
lowercase_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase_ = len(lowercase_ ) > max_length
if needs_to_be_truncated:
lowercase_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowercase_ = processed_features["""attention_mask"""][:max_length]
return processed_features
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Optional[int]=False , lowercase_ : List[str]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
lowercase_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
lowercase_ = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
lowercase_ = padding
else:
lowercase_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 603 | '''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Dict:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->str:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase_ = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE_ , id=SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Dict:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase_ = 0
# Doctest custom flag to ignore output.
__snake_case = doctest.register_optionflag("""IGNORE_RESULT""")
__snake_case = doctest.OutputChecker
class _a ( __a ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ )
__snake_case = CustomOutputChecker
__snake_case = HfDoctestModule
__snake_case = HfDocTestParser
| 603 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCAmelCase = s_dict.pop(SCREAMING_SNAKE_CASE )
elif "subsample" in key:
lowerCAmelCase = s_dict.pop(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location="""cpu""" )
lowerCAmelCase = mam_aaa["""args"""]
lowerCAmelCase = mam_aaa["""model"""]
lowerCAmelCase = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
rename_keys(SCREAMING_SNAKE_CASE )
lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCAmelCase = args.share_decoder_input_output_embed
lowerCAmelCase = [int(SCREAMING_SNAKE_CASE ) for i in args.conv_kernel_sizes.split(""",""" )]
lowerCAmelCase = SpeechaTextConfig(
vocab_size=SCREAMING_SNAKE_CASE , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(SCREAMING_SNAKE_CASE ) , conv_channels=args.conv_channels , conv_kernel_sizes=SCREAMING_SNAKE_CASE , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=SCREAMING_SNAKE_CASE , num_beams=5 , max_length=2_00 , use_cache=SCREAMING_SNAKE_CASE , decoder_start_token_id=2 , early_stopping=SCREAMING_SNAKE_CASE , )
lowerCAmelCase = SpeechaTextForConditionalGeneration(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0 and not set(SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F' but all the following weights are missing {missing}' )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = lm_head_weights
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 532 | 1 |
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple:
"""simple docstring"""
__UpperCamelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
a_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
a_ = get_tests_dir("fixtures/vocab.json")
a_ = get_tests_dir("fixtures")
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def snake_case ( self : Optional[Any] ):
__UpperCamelCase = 0
def snake_case ( self : Tuple ):
__UpperCamelCase = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaConfig()
__UpperCamelCase = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case , os.path.join(snake_case , snake_case ) )
copyfile(snake_case , os.path.join(snake_case , '''vocab.json''' ) )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaFeatureExtractor()
__UpperCamelCase = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
__UpperCamelCase = WavaVecaProcessor(snake_case , snake_case )
# save in new folder
processor.save_pretrained(snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case , snake_case ) , '''r''' ) as f:
__UpperCamelCase = json.load(snake_case )
config_dict.pop('''processor_class''' )
with open(os.path.join(snake_case , snake_case ) , '''w''' ) as f:
f.write(json.dumps(snake_case ) )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaFeatureExtractor()
__UpperCamelCase = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
__UpperCamelCase = WavaVecaProcessor(snake_case , snake_case )
# save in new folder
processor.save_pretrained(snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case , snake_case ) , '''r''' ) as f:
__UpperCamelCase = json.load(snake_case )
config_dict.pop('''processor_class''' )
with open(os.path.join(snake_case , snake_case ) , '''w''' ) as f:
f.write(json.dumps(snake_case ) )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(snake_case )
# copy relevant files
copyfile(snake_case , os.path.join(snake_case , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(snake_case , snake_case ) , '''w''' ) as f:
f.write('''{}''' )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def snake_case ( self : int ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case ):
__UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case ):
__UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case )
__UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
__UpperCamelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
__UpperCamelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
__UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case , use_fast=snake_case )
__UpperCamelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def snake_case ( self : List[Any] ):
try:
AutoConfig.register('''custom''' , snake_case )
AutoFeatureExtractor.register(snake_case , snake_case )
AutoTokenizer.register(snake_case , slow_tokenizer_class=snake_case )
AutoProcessor.register(snake_case , snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case ):
AutoProcessor.register(snake_case , snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = os.path.join(snake_case , '''vocab.txt''' )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__UpperCamelCase = CustomTokenizer(snake_case )
__UpperCamelCase = CustomProcessor(snake_case , snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case )
__UpperCamelCase = AutoProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self : Optional[int] ):
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = False
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = False
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Any = "AutoFeatureExtractor"
lowerCAmelCase__ : str = "AutoTokenizer"
lowerCAmelCase__ : str = False
try:
AutoConfig.register('''custom''' , snake_case )
AutoFeatureExtractor.register(snake_case , snake_case )
AutoTokenizer.register(snake_case , slow_tokenizer_class=snake_case )
AutoProcessor.register(snake_case , snake_case )
# If remote code is not set, the default is to use local classes.
__UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self : Optional[int] ):
__UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def snake_case ( self : str ):
__UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def snake_case ( cls : List[str] ):
__UpperCamelCase = TOKEN
HfFolder.save_token(snake_case )
@classmethod
def snake_case ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def snake_case ( self : int ):
__UpperCamelCase = WavaVecaProcessor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case , '''test-processor''' ) , push_to_hub=snake_case , use_auth_token=self._token )
__UpperCamelCase = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case , getattr(new_processor.feature_extractor , snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def snake_case ( self : Any ):
__UpperCamelCase = WavaVecaProcessor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case , '''test-processor-org''' ) , push_to_hub=snake_case , use_auth_token=self._token , organization='''valid_org''' , )
__UpperCamelCase = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case , getattr(new_processor.feature_extractor , snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def snake_case ( self : List[str] ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = os.path.join(snake_case , '''vocab.txt''' )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__UpperCamelCase = CustomTokenizer(snake_case )
__UpperCamelCase = CustomProcessor(snake_case , snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
__UpperCamelCase = Repository(snake_case , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case , '''tokenizer_config.json''' ) ) as f:
__UpperCamelCase = json.load(snake_case )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case , '''custom_processing.py''' ) ) )
repo.push_to_hub()
__UpperCamelCase = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 375 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] = LongformerTokenizer
a_ : Any = True
a_ : int = LongformerTokenizerFast
a_ : Union[str, Any] = True
def lowerCamelCase ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase_ : Optional[Any] = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
lowerCAmelCase_ : str = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase_ : int = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowercase ) )
def lowerCamelCase ( self : Optional[int] , **a_ : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowerCamelCase ( self : int , **a_ : str ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowerCamelCase ( self : Union[str, Any] , a_ : List[str] ):
lowerCAmelCase_ : List[Any] = '''lower newer'''
lowerCAmelCase_ : Tuple = '''lower newer'''
return input_text, output_text
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : List[str] = '''lower newer'''
lowerCAmelCase_ : Optional[int] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCAmelCase_ : str = tokenizer.tokenize(__lowercase ) # , add_prefix_space=True)
self.assertListEqual(__lowercase , __lowercase )
lowerCAmelCase_ : Dict = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowercase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowercase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
lowerCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowercase )
lowerCAmelCase_ : Dict = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
lowerCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(__lowercase )
lowerCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : str = self.get_tokenizer()
lowerCAmelCase_ : Tuple = '''Encode this sequence.'''
lowerCAmelCase_ : List[str] = tokenizer.byte_encoder[''' '''.encode("utf-8" )[0]]
# Testing encoder arguments
lowerCAmelCase_ : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
lowerCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowercase , __lowercase )
lowerCAmelCase_ : int = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowercase , __lowercase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowerCAmelCase_ : Dict = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowercase , __lowercase )
# Testing spaces after special tokens
lowerCAmelCase_ : Tuple = '''<mask>'''
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase )} ) # mask token has a left space
lowerCAmelCase_ : str = tokenizer.convert_tokens_to_ids(__lowercase )
lowerCAmelCase_ : Dict = '''Encode <mask> sequence'''
lowerCAmelCase_ : Dict = '''Encode <mask>sequence'''
lowerCAmelCase_ : List[Any] = tokenizer.encode(__lowercase )
lowerCAmelCase_ : Union[str, Any] = encoded.index(__lowercase )
lowerCAmelCase_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowercase , __lowercase )
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase )
lowerCAmelCase_ : Union[str, Any] = encoded.index(__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowercase , __lowercase )
def lowerCamelCase ( self : Tuple ):
pass
def lowerCamelCase ( self : Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = '''A, <mask> AllenNLP sentence.'''
lowerCAmelCase_ : int = tokenizer_r.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer_p.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowerCAmelCase_ : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase_ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def lowerCamelCase ( self : int ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
lowerCAmelCase_ : str = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase_ : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowercase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowercase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowercase )
def lowerCamelCase ( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase_ : Tuple = f'''{text_of_1_token} {text_of_1_token}'''
lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
lowerCAmelCase_ : str = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowercase ) + 1, len(__lowercase ) + 1 + len(__lowercase )) , )
lowerCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
lowerCAmelCase_ : Any = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowercase ) + 1, len(__lowercase ) + 1 + len(__lowercase )) , )
lowerCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
lowerCAmelCase_ : Any = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowercase ), len(__lowercase ) + 1 + len(__lowercase )) , )
lowerCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
lowerCAmelCase_ : Union[str, Any] = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowercase ), len(__lowercase ) + 1 + len(__lowercase )) , )
lowerCAmelCase_ : Optional[int] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
lowerCAmelCase_ : List[str] = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowercase ) + 1, 1 + len(__lowercase ) + 1 + len(__lowercase )) , )
lowerCAmelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowercase ), 1 + len(__lowercase ) + 1 + len(__lowercase )) , )
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(
__lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowercase ), 1 + len(__lowercase ) + 1 + len(__lowercase )) , )
| 610 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : str = ["""pixel_values"""]
def __init__( self :str , __lowercase :bool = True , __lowercase :Dict[str, int] = None , __lowercase :float = None , __lowercase :PILImageResampling = PILImageResampling.BILINEAR , __lowercase :bool = True , __lowercase :Union[int, float] = 1 / 255 , __lowercase :bool = True , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[float, List[float]]] = None , **__lowercase :Tuple , ):
super().__init__(**__lowercase )
__lowerCamelCase : int =size if size is not None else {'''shortest_edge''': 384}
__lowerCamelCase : List[str] =get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCamelCase : Any =do_resize
__lowerCamelCase : List[str] =size
# Default value set here for backwards compatibility where the value in config is None
__lowerCamelCase : Tuple =crop_pct if crop_pct is not None else 224 / 256
__lowerCamelCase : Any =resample
__lowerCamelCase : List[str] =do_rescale
__lowerCamelCase : Dict =rescale_factor
__lowerCamelCase : Any =do_normalize
__lowerCamelCase : int =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self :Any , __lowercase :np.ndarray , __lowercase :Dict[str, int] , __lowercase :float , __lowercase :PILImageResampling = PILImageResampling.BICUBIC , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :int , ):
__lowerCamelCase : int =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__lowerCamelCase : Optional[int] =size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowerCamelCase : int =int(shortest_edge / crop_pct )
__lowerCamelCase : str =get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
__lowerCamelCase : str =resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :int , __lowercase :np.ndarray , __lowercase :Union[int, float] , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :List[Any] , ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :str , __lowercase :np.ndarray , __lowercase :Union[float, List[float]] , __lowercase :Union[float, List[float]] , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :Optional[int] , ):
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :int , __lowercase :ImageInput , __lowercase :bool = None , __lowercase :Dict[str, int] = None , __lowercase :float = None , __lowercase :PILImageResampling = None , __lowercase :bool = None , __lowercase :float = None , __lowercase :bool = None , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[str, TensorType]] = None , __lowercase :ChannelDimension = ChannelDimension.FIRST , **__lowercase :Optional[Any] , ):
__lowerCamelCase : Optional[Any] =do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : int =crop_pct if crop_pct is not None else self.crop_pct
__lowerCamelCase : List[Any] =resample if resample is not None else self.resample
__lowerCamelCase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : List[Any] =do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Optional[Any] =image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : str =image_std if image_std is not None else self.image_std
__lowerCamelCase : str =size if size is not None else self.size
__lowerCamelCase : Optional[int] =get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCamelCase : Optional[int] =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase : List[Any] =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
__lowerCamelCase : Union[str, Any] =[self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
__lowerCamelCase : int =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__lowerCamelCase : Union[str, Any] =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__lowerCamelCase : Union[str, Any] =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCamelCase : Tuple ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 179 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
'''simple docstring'''
if (ksize % 2) == 0:
_A= ksize + 1
_A= np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCAmelCase_ ):
for x in range(lowerCAmelCase_ ):
# distance from center
_A= x - ksize // 2
_A= y - ksize // 2
# degree to radiant
_A= theta / 1_80 * np.pi
_A= np.cos(_theta )
_A= np.sin(_theta )
# get kernel x
_A= cos_theta * px + sin_theta * py
# get kernel y
_A= -sin_theta * px + cos_theta * py
# fill kernel
_A= np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCAmelCase_ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCAmelCase_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCAmelCase_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCAmelCase_ = out / out.max() * 255
UpperCAmelCase_ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0) | 476 | import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= torch.nn.Linear(10 , 10 )
_A= torch.optim.SGD(model.parameters() , 0.1 )
_A= Accelerator()
_A= accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state() | 476 | 1 |
'''simple docstring'''
import string
def __lowercase (_lowercase ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__lowerCamelCase : Optional[Any] = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCamelCase : Dict = string.ascii_uppercase.find(_lowercase )
__lowerCamelCase : Union[str, Any] = num - key
if num < 0:
__lowerCamelCase : Dict = num + len(string.ascii_uppercase )
__lowerCamelCase : List[str] = translated + string.ascii_uppercase[num]
else:
__lowerCamelCase : Dict = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def __lowercase () -> None:
"""simple docstring"""
__lowerCamelCase : List[Any] = input("""Encrypted message: """ )
__lowerCamelCase : Union[str, Any] = message.upper()
decrypt(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 150 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def a_ ( self : Union[str, Any] , A__ : float ):
"""simple docstring"""
return 0.0
def __lowercase (_lowercase, _lowercase ) -> tuple[int | float, int | float]:
"""simple docstring"""
__lowerCamelCase : Dict = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowerCamelCase : List[str] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowercase (_lowercase, _lowercase ) -> None:
"""simple docstring"""
__lowerCamelCase : int = 512
__lowerCamelCase : List[str] = [1] + [0] * (size - 1)
__lowerCamelCase : Dict = [filter_type.process(_lowercase ) for item in inputs]
__lowerCamelCase : List[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase : Union[str, Any] = np.abs(np.fft.fft(_lowercase ) )
__lowerCamelCase : Dict = 20 * np.logaa(_lowercase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
__lowerCamelCase : List[Any] = get_bounds(_lowercase, _lowercase )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(_lowercase )
plt.show()
def __lowercase (_lowercase, _lowercase ) -> None:
"""simple docstring"""
__lowerCamelCase : Dict = 512
__lowerCamelCase : List[str] = [1] + [0] * (size - 1)
__lowerCamelCase : str = [filter_type.process(_lowercase ) for item in inputs]
__lowerCamelCase : Optional[int] = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase : List[Any] = np.angle(np.fft.fft(_lowercase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(_lowercase, -2 * pi ) )
plt.show()
| 150 | 1 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __lowercase :
"""simple docstring"""
def __init__(self ):
snake_case_ : str = """"""
snake_case_ : Dict = """"""
snake_case_ : List[str] = []
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = 2_56
snake_case_ : int = 0
snake_case_ : Dict = 0
snake_case_ : Optional[int] = 0
snake_case_ : List[str] = 0
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = cva.imread(lowercase__ , 0 )
snake_case_ : Optional[int] = copy.deepcopy(self.img )
snake_case_ , snake_case_ , snake_case_ : Optional[int] = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label="""x""" )
snake_case_ : Union[str, Any] = np.sum(lowercase__ )
for i in range(len(lowercase__ ) ):
snake_case_ : Optional[int] = x[i] / self.k
self.sk += prk
snake_case_ : str = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : int = int(last % last )
snake_case_ : List[str] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase__ )
snake_case_ : List[str] = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ : str = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ : Optional[int] = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : int = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase (self ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def __UpperCamelCase (self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
a_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
a_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 48 |
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.