code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification A : Optional[int] = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co A : List[str] = "main" # Default branch name A : Union[str, Any] = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2" # One particular commit (not the top of `main`) A : Dict = "aaaaaaa" # This commit does not exist, so we should 404. A : Optional[int] = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684" # Sha-1 of config.json on the top of `main`, for checking purposes A : Optional[Any] = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3" @contextlib.contextmanager def a__ ( ): print("Welcome!" ) yield print("Bye!" ) @contextlib.contextmanager def a__ ( ): print("Bonjour!" ) yield print("Au revoir!" ) class lowerCamelCase (unittest.TestCase ): """simple docstring""" def __A ( self : Dict ) -> Tuple: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("transformers" ) is not None class lowerCamelCase (unittest.TestCase ): """simple docstring""" @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def __A ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Optional[int]: with ContextManagers([] ): print("Transformers are awesome!" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" ) @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def __A ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]: with ContextManagers([context_en()] ): print("Transformers are awesome!" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" ) @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def __A ( self : Any , __magic_name__ : Union[str, Any] ) -> Any: with ContextManagers([context_fr(), context_en()] ): print("Transformers are awesome!" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" ) @require_torch def __A ( self : List[str] ) -> int: self.assertEqual(find_labels(__magic_name__ ) , ["labels"] ) self.assertEqual(find_labels(__magic_name__ ) , ["labels", "next_sentence_label"] ) self.assertEqual(find_labels(__magic_name__ ) , ["start_positions", "end_positions"] ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" pass self.assertEqual(find_labels(__magic_name__ ) , ["labels"] ) @require_tf def __A ( self : Optional[int] ) -> List[str]: self.assertEqual(find_labels(__magic_name__ ) , ["labels"] ) self.assertEqual(find_labels(__magic_name__ ) , ["labels", "next_sentence_label"] ) self.assertEqual(find_labels(__magic_name__ ) , ["start_positions", "end_positions"] ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" pass self.assertEqual(find_labels(__magic_name__ ) , ["labels"] ) @require_flax def __A ( self : Union[str, Any] ) -> Dict: # Flax models don't have labels self.assertEqual(find_labels(__magic_name__ ) , [] ) self.assertEqual(find_labels(__magic_name__ ) , [] ) self.assertEqual(find_labels(__magic_name__ ) , [] ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" pass self.assertEqual(find_labels(__magic_name__ ) , [] )
362
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def a__ ( __UpperCamelCase ): return x + 2 class lowerCamelCase (unittest.TestCase ): """simple docstring""" def __A ( self : List[Any] ) -> int: SCREAMING_SNAKE_CASE_ = "x = 3" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"x": 3} ) SCREAMING_SNAKE_CASE_ = "x = y" SCREAMING_SNAKE_CASE_ = {"y": 5} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} ) def __A ( self : Union[str, Any] ) -> str: SCREAMING_SNAKE_CASE_ = "y = add_two(x)" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} ) # Won't work without the tool with CaptureStdout() as out: SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result is None assert "tried to execute add_two" in out.out def __A ( self : List[str] ) -> int: SCREAMING_SNAKE_CASE_ = "x = 3" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"x": 3} ) def __A ( self : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ ) self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} ) self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def __A ( self : Optional[int] ) -> List[str]: SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} ) def __A ( self : Any ) -> List[str]: SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} ) def __A ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} ) SCREAMING_SNAKE_CASE_ = {"x": 8} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} ) def __A ( self : str ) -> str: SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ ) self.assertListEqual(__magic_name__ , [3, 5] ) self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} ) def __A ( self : Union[str, Any] ) -> List[Any]: SCREAMING_SNAKE_CASE_ = "y = x" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} ) def __A ( self : Tuple ) -> List[Any]: SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} ) SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def __A ( self : Tuple ) -> Any: SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ ) assert result == 2 self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
305
0
from __future__ import annotations def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(__UpperCamelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __UpperCamelCase , __UpperCamelCase , ) def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = [] depth_first_search([] , [] , [] , __UpperCamelCase , __UpperCamelCase ) # Print all the boards for board in boards: for column in board: print(__UpperCamelCase ) print("" ) print(len(__UpperCamelCase ) , "solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
363
import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] ) SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = (1, 2, 1) SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7) SCREAMING_SNAKE_CASE_ = SARIMAX( __UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" ) SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] ) return result[0] def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase ) return y_pred[0] def a__ ( __UpperCamelCase ): train_user.sort() SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 ) SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 ) SCREAMING_SNAKE_CASE_ = qa - qa SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1) return low_lim def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 for i in list_vote: if i > actual_result: SCREAMING_SNAKE_CASE_ = not_safe + 1 else: if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]] A : Optional[Any] = pd.DataFrame( data_input, columns=["total_user", "total_even", "days"] ) A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values) # split data A : Optional[int] = normalize_df[:, 2].tolist() A : List[str] = normalize_df[:, 0].tolist() A : int = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) A : int = normalize_df[:, [1, 2]].tolist() A : Tuple = x[: len(x) - 1] A : str = x[len(x) - 1 :] # for linear regression & sarimax A : Tuple = total_date[: len(total_date) - 1] A : Optional[int] = total_user[: len(total_user) - 1] A : str = total_match[: len(total_match) - 1] A : List[Any] = total_date[len(total_date) - 1 :] A : List[Any] = total_user[len(total_user) - 1 :] A : Optional[Any] = total_match[len(total_match) - 1 :] # voting system with forecasting A : Optional[int] = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data A : str = "" if data_safety_checker(res_vote, tst_user) else "not " print("Today's data is {not_str}safe.")
305
0
from __future__ import annotations from typing import Any class lowerCamelCase : """simple docstring""" def __init__( self : Optional[int] , __magic_name__ : int = 6 ) -> None: SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None self.create_linked_list(__magic_name__ ) def __A ( self : Union[str, Any] , __magic_name__ : int ) -> None: SCREAMING_SNAKE_CASE_ = Node() SCREAMING_SNAKE_CASE_ = current_node SCREAMING_SNAKE_CASE_ = current_node SCREAMING_SNAKE_CASE_ = current_node for _ in range(1 , __magic_name__ ): SCREAMING_SNAKE_CASE_ = Node() SCREAMING_SNAKE_CASE_ = current_node SCREAMING_SNAKE_CASE_ = previous_node SCREAMING_SNAKE_CASE_ = current_node SCREAMING_SNAKE_CASE_ = self.front SCREAMING_SNAKE_CASE_ = previous_node def __A ( self : List[str] ) -> bool: return ( self.front == self.rear and self.front is not None and self.front.data is None ) def __A ( self : Dict ) -> Any | None: self.check_can_perform_operation() return self.front.data if self.front else None def __A ( self : Dict , __magic_name__ : Any ) -> None: if self.rear is None: return self.check_is_full() if not self.is_empty(): SCREAMING_SNAKE_CASE_ = self.rear.next if self.rear: SCREAMING_SNAKE_CASE_ = data def __A ( self : int ) -> Any: self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: SCREAMING_SNAKE_CASE_ = self.front.data SCREAMING_SNAKE_CASE_ = None return data SCREAMING_SNAKE_CASE_ = self.front SCREAMING_SNAKE_CASE_ = old_front.next SCREAMING_SNAKE_CASE_ = old_front.data SCREAMING_SNAKE_CASE_ = None return data def __A ( self : Tuple ) -> None: if self.is_empty(): raise Exception("Empty Queue" ) def __A ( self : int ) -> None: if self.rear and self.rear.next == self.front: raise Exception("Full Queue" ) class lowerCamelCase : """simple docstring""" def __init__( self : Dict ) -> None: SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if __name__ == "__main__": import doctest doctest.testmod()
364
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A : List[str] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Any = [ "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "SwinForImageClassification", "SwinForMaskedImageModeling", "SwinModel", "SwinPreTrainedModel", "SwinBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", "TFSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
305
0
"""simple docstring""" def a__ ( __UpperCamelCase , __UpperCamelCase ): if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) SCREAMING_SNAKE_CASE_ = str(bin(__UpperCamelCase ) ) binary_number += "0" * shift_amount return binary_number def a__ ( __UpperCamelCase , __UpperCamelCase ): if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) SCREAMING_SNAKE_CASE_ = str(bin(__UpperCamelCase ) )[2:] if shift_amount >= len(__UpperCamelCase ): return "0b0" SCREAMING_SNAKE_CASE_ = binary_number[: len(__UpperCamelCase ) - shift_amount] return "0b" + shifted_binary_number def a__ ( __UpperCamelCase , __UpperCamelCase ): if number >= 0: # Get binary representation of positive number SCREAMING_SNAKE_CASE_ = "0" + str(bin(__UpperCamelCase ) ).strip("-" )[2:] else: # Get binary (2's complement) representation of negative number SCREAMING_SNAKE_CASE_ = len(bin(__UpperCamelCase )[3:] ) # Find 2's complement of number SCREAMING_SNAKE_CASE_ = bin(abs(__UpperCamelCase ) - (1 << binary_number_length) )[3:] SCREAMING_SNAKE_CASE_ = ( "1" + "0" * (binary_number_length - len(__UpperCamelCase )) + binary_number ) if shift_amount >= len(__UpperCamelCase ): return "0b" + binary_number[0] * len(__UpperCamelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__UpperCamelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
365
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel A : Union[str, Any] = "0.12" # assumed parallelism: 8 @require_flax @is_staging_test class lowerCamelCase (unittest.TestCase ): """simple docstring""" @classmethod def __A ( cls : Any ) -> Dict: SCREAMING_SNAKE_CASE_ = TOKEN HfFolder.save_token(__magic_name__ ) @classmethod def __A ( cls : Optional[int] ) -> Tuple: try: delete_repo(token=cls._token , repo_id="test-model-flax" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" ) except HTTPError: pass def __A ( self : str ) -> str: SCREAMING_SNAKE_CASE_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ ) model.push_to_hub("test-model-flax" , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id="test-model-flax" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__magic_name__ , repo_id="test-model-flax" , push_to_hub=__magic_name__ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' ) def __A ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ ) model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __magic_name__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=__magic_name__ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' ) def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params ) SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: SCREAMING_SNAKE_CASE_ = False return models_are_equal @require_flax class lowerCamelCase (unittest.TestCase ): """simple docstring""" def __A ( self : str ) -> Dict: SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ ) SCREAMING_SNAKE_CASE_ = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) ) with self.assertRaises(__magic_name__ ): SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ ) self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) ) def __A ( self : Optional[Any] ) -> Tuple: SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ ) SCREAMING_SNAKE_CASE_ = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="10KB" ) with self.assertRaises(__magic_name__ ): SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ ) self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) ) def __A ( self : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE_ = "bert" SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(__magic_name__ ): SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def __A ( self : List[str] ) -> Dict: SCREAMING_SNAKE_CASE_ = "bert" SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(__magic_name__ ): SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ ) self.assertIsNotNone(__magic_name__ )
305
0
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL A : Optional[Any] = logging.get_logger(__name__) def a__ ( __UpperCamelCase ): if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__UpperCamelCase ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = ['''pixel_values'''] def __init__( self : Dict , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 255 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : List[Any] , ) -> None: super().__init__(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = size if size is not None else {"shortest_edge": 256} SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {"height": 224, "width": 224} SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , param_name="crop_size" ) SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = do_center_crop SCREAMING_SNAKE_CASE_ = crop_size SCREAMING_SNAKE_CASE_ = resample SCREAMING_SNAKE_CASE_ = do_rescale SCREAMING_SNAKE_CASE_ = rescale_factor SCREAMING_SNAKE_CASE_ = offset SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def __A ( self : Tuple , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : List[str] , ) -> np.ndarray: SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) if "shortest_edge" in size: SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(__magic_name__ , size["shortest_edge"] , default_to_square=__magic_name__ ) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE_ = (size["height"], size["width"]) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def __A ( self : Tuple , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> np.ndarray: SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(__magic_name__ , size=(size["height"], size["width"]) , data_format=__magic_name__ , **__magic_name__ ) def __A ( self : str , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] , ) -> str: SCREAMING_SNAKE_CASE_ = image.astype(np.floataa ) if offset: SCREAMING_SNAKE_CASE_ = image - (scale / 2) return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def __A ( self : List[str] , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> np.ndarray: return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def __A ( self : List[str] , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ = to_numpy_array(__magic_name__ ) if do_resize: SCREAMING_SNAKE_CASE_ = self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) if do_center_crop: SCREAMING_SNAKE_CASE_ = self.center_crop(__magic_name__ , size=__magic_name__ ) if do_rescale: SCREAMING_SNAKE_CASE_ = self.rescale(image=__magic_name__ , scale=__magic_name__ , offset=__magic_name__ ) if do_normalize: SCREAMING_SNAKE_CASE_ = self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) SCREAMING_SNAKE_CASE_ = to_channel_dimension_format(__magic_name__ , __magic_name__ ) return image def __A ( self : Dict , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> PIL.Image.Image: SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ = offset if offset is not None else self.offset SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ = size if size is not None else self.size SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , param_name="crop_size" ) if not valid_images(__magic_name__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) SCREAMING_SNAKE_CASE_ = make_batched(__magic_name__ ) SCREAMING_SNAKE_CASE_ = [ [ self._preprocess_image( image=__magic_name__ , do_resize=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , do_center_crop=__magic_name__ , crop_size=__magic_name__ , do_rescale=__magic_name__ , rescale_factor=__magic_name__ , offset=__magic_name__ , do_normalize=__magic_name__ , image_mean=__magic_name__ , image_std=__magic_name__ , data_format=__magic_name__ , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE_ = {"pixel_values": videos} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
366
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input A : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def a__ ( ): SCREAMING_SNAKE_CASE_ = _ask_options( "In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: SCREAMING_SNAKE_CASE_ = get_sagemaker_input() else: SCREAMING_SNAKE_CASE_ = get_cluster_input() return config def a__ ( __UpperCamelCase=None ): if subparsers is not None: SCREAMING_SNAKE_CASE_ = subparsers.add_parser("config" , description=__UpperCamelCase ) else: SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Accelerate config command" , description=__UpperCamelCase ) parser.add_argument( "--config_file" , default=__UpperCamelCase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=__UpperCamelCase ) return parser def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = get_user_input() if args.config_file is not None: SCREAMING_SNAKE_CASE_ = args.config_file else: if not os.path.isdir(__UpperCamelCase ): os.makedirs(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = default_yaml_config_file if config_file.endswith(".json" ): config.to_json_file(__UpperCamelCase ) else: config.to_yaml_file(__UpperCamelCase ) print(F'''accelerate configuration saved at {config_file}''' ) def a__ ( ): SCREAMING_SNAKE_CASE_ = config_command_parser() SCREAMING_SNAKE_CASE_ = parser.parse_args() config_command(__UpperCamelCase ) if __name__ == "__main__": main()
305
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf A : List[Any] = logging.get_logger(__name__) @dataclass class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Optional[Any] , **__magic_name__ : Dict ) -> Union[str, Any]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE_ = deprecated_arg[3:] SCREAMING_SNAKE_CASE_ = not kwargs.pop(__magic_name__ ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) SCREAMING_SNAKE_CASE_ = kwargs.pop("tpu_name" , self.tpu_name ) SCREAMING_SNAKE_CASE_ = kwargs.pop("device_idx" , self.device_idx ) SCREAMING_SNAKE_CASE_ = kwargs.pop("eager_mode" , self.eager_mode ) SCREAMING_SNAKE_CASE_ = kwargs.pop("use_xla" , self.use_xla ) super().__init__(**__magic_name__ ) lowerCamelCase__ = field( default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Name of TPU'''} , ) lowerCamelCase__ = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Benchmark models in eager model.'''} ) lowerCamelCase__ = field( default=SCREAMING_SNAKE_CASE__ , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def __A ( self : Dict ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["tf"] ) SCREAMING_SNAKE_CASE_ = None if self.tpu: try: if self.tpu_name: SCREAMING_SNAKE_CASE_ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: SCREAMING_SNAKE_CASE_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: SCREAMING_SNAKE_CASE_ = None return tpu @cached_property def __A ( self : Any ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["tf"] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) SCREAMING_SNAKE_CASE_ = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" ) SCREAMING_SNAKE_CASE_ = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , "GPU" ) # disable GPU SCREAMING_SNAKE_CASE_ = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def __A ( self : int ) -> bool: requires_backends(self , ["tf"] ) return self._setup_tpu is not None @property def __A ( self : Any ) -> "tf.distribute.Strategy": requires_backends(self , ["tf"] ) return self._setup_strategy @property def __A ( self : Optional[Any] ) -> str: requires_backends(self , ["tf"] ) return tf.config.list_physical_devices("GPU" ) @property def __A ( self : Optional[int] ) -> int: requires_backends(self , ["tf"] ) if self.cuda: return len(self.gpu_list ) return 0 @property def __A ( self : List[str] ) -> bool: return self.n_gpu > 0
367
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=SCREAMING_SNAKE_CASE__ ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) lowerCamelCase__ = Features({'''text''': Value('''string''' )} ) lowerCamelCase__ = Features({'''summary''': Value('''string''' )} ) lowerCamelCase__ = "text" lowerCamelCase__ = "summary" @property def __A ( self : Dict ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
305
0
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # Initialise PyTorch model SCREAMING_SNAKE_CASE_ = TaConfig.from_json_file(__UpperCamelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration(__UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_ta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) A : Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
368
from ....utils import logging A : List[str] = logging.get_logger(__name__) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=2_048 ) -> List[Any]: SCREAMING_SNAKE_CASE_ = config.__dict__ SCREAMING_SNAKE_CASE_ = modal_hidden_size if num_labels: SCREAMING_SNAKE_CASE_ = num_labels
305
0
def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = [0 for i in range(r + 1 )] # nc0 = 1 SCREAMING_SNAKE_CASE_ = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. SCREAMING_SNAKE_CASE_ = min(__UpperCamelCase , __UpperCamelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
369
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViltImageProcessor''' lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : List[str]=None , **__magic_name__ : Any ) -> str: SCREAMING_SNAKE_CASE_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __magic_name__ , ) SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__magic_name__ , __magic_name__ ) SCREAMING_SNAKE_CASE_ = self.image_processor def __call__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ) -> BatchEncoding: SCREAMING_SNAKE_CASE_ = self.tokenizer( text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) # add pixel_values + pixel_mask SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ ) encoding.update(__magic_name__ ) return encoding def __A ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Optional[Any] ) -> Any: return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ ) def __A ( self : Dict , *__magic_name__ : List[Any] , **__magic_name__ : Union[str, Any] ) -> str: return self.tokenizer.decode(*__magic_name__ , **__magic_name__ ) @property def __A ( self : Optional[int] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __A ( self : Dict ) -> List[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , ) return self.image_processor_class @property def __A ( self : int ) -> List[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , ) return self.image_processor
305
0
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Optional[int] = logging.get_logger(__name__) A : str = { "google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json", "google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json", "google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json", } class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''owlvit_text_model''' def __init__( self : Tuple , __magic_name__ : List[Any]=49_408 , __magic_name__ : str=512 , __magic_name__ : Dict=2_048 , __magic_name__ : Dict=12 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=16 , __magic_name__ : Optional[Any]="quick_gelu" , __magic_name__ : Optional[Any]=1e-5 , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : str=0.02 , __magic_name__ : List[str]=1.0 , __magic_name__ : Optional[int]=0 , __magic_name__ : Tuple=49_406 , __magic_name__ : Optional[Any]=49_407 , **__magic_name__ : List[str] , ) -> Tuple: super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = initializer_factor @classmethod def __A ( cls : List[str] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : str ) -> "PretrainedConfig": cls._set_token_in_kwargs(__magic_name__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": SCREAMING_SNAKE_CASE_ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''owlvit_vision_model''' def __init__( self : List[Any] , __magic_name__ : int=768 , __magic_name__ : Dict=3_072 , __magic_name__ : Any=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : List[Any]=3 , __magic_name__ : List[Any]=768 , __magic_name__ : Any=32 , __magic_name__ : Tuple="quick_gelu" , __magic_name__ : List[Any]=1e-5 , __magic_name__ : Tuple=0.0 , __magic_name__ : Any=0.02 , __magic_name__ : Union[str, Any]=1.0 , **__magic_name__ : Optional[int] , ) -> Tuple: super().__init__(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = initializer_factor @classmethod def __A ( cls : Union[str, Any] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Tuple ) -> "PretrainedConfig": cls._set_token_in_kwargs(__magic_name__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("model_type" ) == "owlvit": SCREAMING_SNAKE_CASE_ = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''owlvit''' lowerCamelCase__ = True def __init__( self : List[Any] , __magic_name__ : Any=None , __magic_name__ : str=None , __magic_name__ : Optional[int]=512 , __magic_name__ : Any=2.6592 , __magic_name__ : Optional[int]=True , **__magic_name__ : Union[str, Any] , ) -> Any: super().__init__(**__magic_name__ ) if text_config is None: SCREAMING_SNAKE_CASE_ = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." ) if vision_config is None: SCREAMING_SNAKE_CASE_ = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." ) SCREAMING_SNAKE_CASE_ = OwlViTTextConfig(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = OwlViTVisionConfig(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = logit_scale_init_value SCREAMING_SNAKE_CASE_ = return_dict SCREAMING_SNAKE_CASE_ = 1.0 @classmethod def __A ( cls : Optional[int] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Union[str, Any] ) -> "PretrainedConfig": cls._set_token_in_kwargs(__magic_name__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ ) if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) @classmethod def __A ( cls : str , __magic_name__ : Dict , __magic_name__ : Dict , **__magic_name__ : Dict ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = text_config SCREAMING_SNAKE_CASE_ = vision_config return cls.from_dict(__magic_name__ , **__magic_name__ ) def __A ( self : str ) -> Any: SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ = self.text_config.to_dict() SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict() SCREAMING_SNAKE_CASE_ = self.__class__.model_type return output class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def __A ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def __A ( self : int ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def __A ( self : Dict ) -> float: return 1e-4 def __A ( self : Any , __magic_name__ : "ProcessorMixin" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]: SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs( processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ ) SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs( processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ ) return {**text_input_dict, **image_input_dict} @property def __A ( self : Union[str, Any] ) -> int: return 14
370
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING A : str = logging.get_logger(__name__) A : Optional[int] = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''table-transformer''' lowerCamelCase__ = ['''past_key_values'''] lowerCamelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : List[Any] , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Any=3 , __magic_name__ : List[str]=100 , __magic_name__ : Union[str, Any]=6 , __magic_name__ : Dict=2_048 , __magic_name__ : str=8 , __magic_name__ : int=6 , __magic_name__ : List[Any]=2_048 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="relu" , __magic_name__ : List[str]=256 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : str=1.0 , __magic_name__ : int=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : List[str]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.1 , **__magic_name__ : Tuple , ) -> str: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ = backbone_config.get("model_type" ) SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ ) # set timm attributes to None SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None SCREAMING_SNAKE_CASE_ = use_timm_backbone SCREAMING_SNAKE_CASE_ = backbone_config SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = num_queries SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = encoder_ffn_dim SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = encoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = activation_function SCREAMING_SNAKE_CASE_ = init_std SCREAMING_SNAKE_CASE_ = init_xavier_std SCREAMING_SNAKE_CASE_ = encoder_layerdrop SCREAMING_SNAKE_CASE_ = decoder_layerdrop SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = auxiliary_loss SCREAMING_SNAKE_CASE_ = position_embedding_type SCREAMING_SNAKE_CASE_ = backbone SCREAMING_SNAKE_CASE_ = use_pretrained_backbone SCREAMING_SNAKE_CASE_ = dilation # Hungarian matcher SCREAMING_SNAKE_CASE_ = class_cost SCREAMING_SNAKE_CASE_ = bbox_cost SCREAMING_SNAKE_CASE_ = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE_ = mask_loss_coefficient SCREAMING_SNAKE_CASE_ = dice_loss_coefficient SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient SCREAMING_SNAKE_CASE_ = giou_loss_coefficient SCREAMING_SNAKE_CASE_ = eos_coefficient super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ ) @property def __A ( self : Union[str, Any] ) -> int: return self.encoder_attention_heads @property def __A ( self : Any ) -> int: return self.d_model class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = version.parse('''1.11''' ) @property def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def __A ( self : Any ) -> float: return 1e-5 @property def __A ( self : int ) -> int: return 12
305
0
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : """simple docstring""" def __init__( self : str , __magic_name__ : Dict , __magic_name__ : str=13 , __magic_name__ : Dict=[30, 30] , __magic_name__ : Any=2 , __magic_name__ : Optional[int]=3 , __magic_name__ : Dict=True , __magic_name__ : Tuple=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[Any]=5 , __magic_name__ : List[str]=4 , __magic_name__ : Any=37 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[Any]=10 , __magic_name__ : str=0.02 , __magic_name__ : int=3 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[int]=8 , __magic_name__ : int=10 , ) -> Any: SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = n_targets SCREAMING_SNAKE_CASE_ = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size) * (image_size[0] // patch_size) SCREAMING_SNAKE_CASE_ = num_patches + 1 + self.num_detection_tokens def __A ( self : Dict ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) SCREAMING_SNAKE_CASE_ = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) SCREAMING_SNAKE_CASE_ = [] for i in range(self.batch_size ): SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=__magic_name__ ) SCREAMING_SNAKE_CASE_ = torch.rand(self.n_targets , 4 , device=__magic_name__ ) labels.append(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.get_config() return config, pixel_values, labels def __A ( self : List[Any] ) -> List[Any]: return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def __A ( self : Dict , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = YolosModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def __A ( self : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> Dict: SCREAMING_SNAKE_CASE_ = YolosForObjectDetection(__magic_name__ ) model.to(__magic_name__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ ) SCREAMING_SNAKE_CASE_ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def __A ( self : List[str] ) -> Tuple: SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowerCamelCase__ = ( {'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def __A ( self : int , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str=False ) -> int: SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": SCREAMING_SNAKE_CASE_ = [] for i in range(self.model_tester.batch_size ): SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = torch.ones( size=(self.model_tester.n_targets,) , device=__magic_name__ , dtype=torch.long ) SCREAMING_SNAKE_CASE_ = torch.ones( self.model_tester.n_targets , 4 , device=__magic_name__ , dtype=torch.float ) labels.append(__magic_name__ ) SCREAMING_SNAKE_CASE_ = labels return inputs_dict def __A ( self : Optional[Any] ) -> List[str]: SCREAMING_SNAKE_CASE_ = YolosModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def __A ( self : Dict ) -> str: self.config_tester.run_common_tests() def __A ( self : str ) -> Tuple: # YOLOS does not use inputs_embeds pass def __A ( self : List[str] ) -> Dict: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def __A ( self : Optional[int] ) -> List[Any]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ ) SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __magic_name__ ) def __A ( self : Tuple ) -> Any: SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = True # in YOLOS, the seq_len is different SCREAMING_SNAKE_CASE_ = self.model_tester.expected_seq_len for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) SCREAMING_SNAKE_CASE_ = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) SCREAMING_SNAKE_CASE_ = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) SCREAMING_SNAKE_CASE_ = len(__magic_name__ ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) SCREAMING_SNAKE_CASE_ = 1 self.assertEqual(out_len + added_hidden_states , len(__magic_name__ ) ) SCREAMING_SNAKE_CASE_ = outputs.attentions self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def __A ( self : Dict ) -> str: def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ): SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) SCREAMING_SNAKE_CASE_ = outputs.hidden_states SCREAMING_SNAKE_CASE_ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__magic_name__ ) , __magic_name__ ) # YOLOS has a different seq_length SCREAMING_SNAKE_CASE_ = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def __A ( self : Optional[int] ) -> str: SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*__magic_name__ ) @slow def __A ( self : Any ) -> Optional[Any]: for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = YolosModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def a__ ( ): SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase (unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : Dict ) -> Optional[Any]: return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None @slow def __A ( self : Any ) -> List[str]: SCREAMING_SNAKE_CASE_ = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(inputs.pixel_values ) # verify outputs SCREAMING_SNAKE_CASE_ = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) SCREAMING_SNAKE_CASE_ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__magic_name__ , ) SCREAMING_SNAKE_CASE_ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) ) # verify postprocessing SCREAMING_SNAKE_CASE_ = image_processor.post_process_object_detection( __magic_name__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] SCREAMING_SNAKE_CASE_ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__magic_name__ ) SCREAMING_SNAKE_CASE_ = [75, 75, 17, 63, 17] SCREAMING_SNAKE_CASE_ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__magic_name__ ) self.assertEqual(len(results["scores"] ) , 5 ) self.assertTrue(torch.allclose(results["scores"] , __magic_name__ , atol=1e-4 ) ) self.assertSequenceEqual(results["labels"].tolist() , __magic_name__ ) self.assertTrue(torch.allclose(results["boxes"][0, :] , __magic_name__ ) )
371
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
305
0
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() A : int = logging.get_logger("transformers.models.speecht5") A : Any = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } A : List[str] = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } A : Dict = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } A : int = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } A : Tuple = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } A : Optional[Any] = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } A : Optional[Any] = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } A : List[Any] = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } A : Optional[Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } A : Dict = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } A : str = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } A : str = [] A : Any = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] A : str = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] A : int = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] A : List[str] = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): for attribute in key.split("." ): SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ) if weight_type is not None: SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ).shape else: SCREAMING_SNAKE_CASE_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": SCREAMING_SNAKE_CASE_ = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE_ = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE_ = value elif weight_type == "bias": SCREAMING_SNAKE_CASE_ = value elif weight_type == "running_mean": SCREAMING_SNAKE_CASE_ = value elif weight_type == "running_var": SCREAMING_SNAKE_CASE_ = value elif weight_type == "num_batches_tracked": SCREAMING_SNAKE_CASE_ = value else: SCREAMING_SNAKE_CASE_ = value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def a__ ( __UpperCamelCase , __UpperCamelCase ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = [] if task == "s2t": SCREAMING_SNAKE_CASE_ = hf_model.speechta.encoder.prenet.feature_encoder SCREAMING_SNAKE_CASE_ = MAPPING_S2T SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_S2T elif task == "t2s": SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = MAPPING_T2S SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_T2S elif task == "s2s": SCREAMING_SNAKE_CASE_ = hf_model.speechta.encoder.prenet.feature_encoder SCREAMING_SNAKE_CASE_ = MAPPING_S2S SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(__UpperCamelCase , __UpperCamelCase ): logger.info(F'''{name} was ignored''' ) continue SCREAMING_SNAKE_CASE_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == "group" , ) SCREAMING_SNAKE_CASE_ = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = key.split(".*." ) if prefix in name and suffix in name: SCREAMING_SNAKE_CASE_ = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: SCREAMING_SNAKE_CASE_ = True if "*" in mapped_key: SCREAMING_SNAKE_CASE_ = name.split(__UpperCamelCase )[0].split("." )[-2] SCREAMING_SNAKE_CASE_ = mapped_key.replace("*" , __UpperCamelCase ) if "weight_g" in name: SCREAMING_SNAKE_CASE_ = "weight_g" elif "weight_v" in name: SCREAMING_SNAKE_CASE_ = "weight_v" elif "bias" in name: SCREAMING_SNAKE_CASE_ = "bias" elif "weight" in name: SCREAMING_SNAKE_CASE_ = "weight" elif "running_mean" in name: SCREAMING_SNAKE_CASE_ = "running_mean" elif "running_var" in name: SCREAMING_SNAKE_CASE_ = "running_var" elif "num_batches_tracked" in name: SCREAMING_SNAKE_CASE_ = "num_batches_tracked" else: SCREAMING_SNAKE_CASE_ = None set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = full_name.split("conv_layers." )[-1] SCREAMING_SNAKE_CASE_ = name.split("." ) SCREAMING_SNAKE_CASE_ = int(items[0] ) SCREAMING_SNAKE_CASE_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ): if config_path is not None: SCREAMING_SNAKE_CASE_ = SpeechTaConfig.from_pretrained(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE_ = SpeechTaConfig() if task == "s2t": SCREAMING_SNAKE_CASE_ = config.max_text_positions SCREAMING_SNAKE_CASE_ = SpeechTaForSpeechToText(__UpperCamelCase ) elif task == "t2s": SCREAMING_SNAKE_CASE_ = 1_8_7_6 SCREAMING_SNAKE_CASE_ = 6_0_0 SCREAMING_SNAKE_CASE_ = config.max_speech_positions SCREAMING_SNAKE_CASE_ = SpeechTaForTextToSpeech(__UpperCamelCase ) elif task == "s2s": SCREAMING_SNAKE_CASE_ = 1_8_7_6 SCREAMING_SNAKE_CASE_ = config.max_speech_positions SCREAMING_SNAKE_CASE_ = SpeechTaForSpeechToSpeech(__UpperCamelCase ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: SCREAMING_SNAKE_CASE_ = SpeechTaTokenizer(__UpperCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken("<mask>" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor() SCREAMING_SNAKE_CASE_ = SpeechTaProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase ) recursively_load_weights(fairseq_checkpoint["model"] , __UpperCamelCase , __UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(__UpperCamelCase ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": A : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) A : int = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
350
from __future__ import annotations import numpy as np def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase ) if rows != columns: SCREAMING_SNAKE_CASE_ = ( "'table' has to be of square shaped array but got a " F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) ) SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) ) for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) ) if upper[j][j] == 0: raise ArithmeticError("No LU decomposition exists" ) SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j] SCREAMING_SNAKE_CASE_ = 1 for j in range(__UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
305
0
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = OmegaConf.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )["model"] SCREAMING_SNAKE_CASE_ = list(state_dict.keys() ) # extract state_dict for VQVAE SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = "first_stage_model." for key in keys: if key.startswith(__UpperCamelCase ): SCREAMING_SNAKE_CASE_ = state_dict[key] # extract state_dict for UNetLDM SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = "model.diffusion_model." for key in keys: if key.startswith(__UpperCamelCase ): SCREAMING_SNAKE_CASE_ = state_dict[key] SCREAMING_SNAKE_CASE_ = config.model.params.first_stage_config.params SCREAMING_SNAKE_CASE_ = config.model.params.unet_config.params SCREAMING_SNAKE_CASE_ = VQModel(**__UpperCamelCase ).eval() vqvae.load_state_dict(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = UNetLDMModel(**__UpperCamelCase ).eval() unet.load_state_dict(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__UpperCamelCase , ) SCREAMING_SNAKE_CASE_ = LDMPipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) pipeline.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": A : int = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", type=str, required=True) parser.add_argument("--config_path", type=str, required=True) parser.add_argument("--output_path", type=str, required=True) A : int = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
351
from math import pi, sqrt, tan def a__ ( __UpperCamelCase ): if side_length < 0: raise ValueError("surface_area_cube() only accepts non-negative values" ) return 6 * side_length**2 def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if length < 0 or breadth < 0 or height < 0: raise ValueError("surface_area_cuboid() only accepts non-negative values" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def a__ ( __UpperCamelCase ): if radius < 0: raise ValueError("surface_area_sphere() only accepts non-negative values" ) return 4 * pi * radius**2 def a__ ( __UpperCamelCase ): if radius < 0: raise ValueError("surface_area_hemisphere() only accepts non-negative values" ) return 3 * pi * radius**2 def a__ ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError("surface_area_cone() only accepts non-negative values" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( "surface_area_conical_frustum() only accepts non-negative values" ) SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def a__ ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError("surface_area_cylinder() only accepts non-negative values" ) return 2 * pi * radius * (height + radius) def a__ ( __UpperCamelCase , __UpperCamelCase ): if torus_radius < 0 or tube_radius < 0: raise ValueError("surface_area_torus() only accepts non-negative values" ) if torus_radius < tube_radius: raise ValueError( "surface_area_torus() does not support spindle or self intersecting tori" ) return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius def a__ ( __UpperCamelCase , __UpperCamelCase ): if length < 0 or width < 0: raise ValueError("area_rectangle() only accepts non-negative values" ) return length * width def a__ ( __UpperCamelCase ): if side_length < 0: raise ValueError("area_square() only accepts non-negative values" ) return side_length**2 def a__ ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError("area_triangle() only accepts non-negative values" ) return (base * height) / 2 def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("area_triangle_three_sides() only accepts non-negative values" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("Given three sides do not form a triangle" ) SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2 SCREAMING_SNAKE_CASE_ = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def a__ ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError("area_parallelogram() only accepts non-negative values" ) return base * height def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if basea < 0 or basea < 0 or height < 0: raise ValueError("area_trapezium() only accepts non-negative values" ) return 1 / 2 * (basea + basea) * height def a__ ( __UpperCamelCase ): if radius < 0: raise ValueError("area_circle() only accepts non-negative values" ) return pi * radius**2 def a__ ( __UpperCamelCase , __UpperCamelCase ): if radius_x < 0 or radius_y < 0: raise ValueError("area_ellipse() only accepts non-negative values" ) return pi * radius_x * radius_y def a__ ( __UpperCamelCase , __UpperCamelCase ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError("area_rhombus() only accepts non-negative values" ) return 1 / 2 * diagonal_a * diagonal_a def a__ ( __UpperCamelCase , __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3: raise ValueError( "area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides" ) elif length < 0: raise ValueError( "area_reg_polygon() only accepts non-negative values as \ length of a side" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("[DEMO] Areas of various geometric shapes: \n") print(f"Rectangle: {area_rectangle(10, 20) = }") print(f"Square: {area_square(10) = }") print(f"Triangle: {area_triangle(10, 10) = }") print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }") print(f"Parallelogram: {area_parallelogram(10, 20) = }") print(f"Rhombus: {area_rhombus(10, 20) = }") print(f"Trapezium: {area_trapezium(10, 20, 30) = }") print(f"Circle: {area_circle(20) = }") print(f"Ellipse: {area_ellipse(10, 20) = }") print("\nSurface Areas of various geometric shapes: \n") print(f"Cube: {surface_area_cube(20) = }") print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }") print(f"Sphere: {surface_area_sphere(20) = }") print(f"Hemisphere: {surface_area_hemisphere(20) = }") print(f"Cone: {surface_area_cone(10, 20) = }") print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(f"Cylinder: {surface_area_cylinder(10, 20) = }") print(f"Torus: {surface_area_torus(20, 10) = }") print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }") print(f"Square: {area_reg_polygon(4, 10) = }") print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
305
0
class lowerCamelCase : """simple docstring""" def __init__( self : Tuple ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = {} def __A ( self : Tuple ) -> None: print(self.vertex ) for i in self.vertex: print(__magic_name__ , " -> " , " -> ".join([str(__magic_name__ ) for j in self.vertex[i]] ) ) def __A ( self : List[str] , __magic_name__ : int , __magic_name__ : int ) -> None: # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(__magic_name__ ) else: # else make a new vertex SCREAMING_SNAKE_CASE_ = [to_vertex] def __A ( self : List[Any] ) -> None: # visited array for storing already visited nodes SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__magic_name__ , __magic_name__ ) def __A ( self : Tuple , __magic_name__ : int , __magic_name__ : list ) -> None: # mark start vertex as visited SCREAMING_SNAKE_CASE_ = True print(__magic_name__ , end=" " ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__magic_name__ , __magic_name__ ) if __name__ == "__main__": A : Tuple = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
352
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[str] = logging.get_logger(__name__) A : int = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''blenderbot-small''' lowerCamelCase__ = ['''past_key_values'''] lowerCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Dict , __magic_name__ : Dict=50_265 , __magic_name__ : str=512 , __magic_name__ : List[Any]=8 , __magic_name__ : Any=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Any=8 , __magic_name__ : Optional[int]=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Any=True , __magic_name__ : Dict="gelu" , __magic_name__ : Tuple=512 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[Any]=False , __magic_name__ : str=0 , __magic_name__ : Dict=1 , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Optional[Any] , ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = encoder_ffn_dim SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = encoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = activation_function SCREAMING_SNAKE_CASE_ = init_std SCREAMING_SNAKE_CASE_ = encoder_layerdrop SCREAMING_SNAKE_CASE_ = decoder_layerdrop SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def __A ( self : str ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: SCREAMING_SNAKE_CASE_ = {0: "batch"} SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "past_decoder_sequence + sequence"} else: SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"} SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers for i in range(__magic_name__ ): SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"} SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"} else: SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_ = super().outputs else: SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self ).outputs if self.use_past: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers for i in range(__magic_name__ ): SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"} SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __A ( self : int , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # Generate decoder inputs SCREAMING_SNAKE_CASE_ = seq_length if not self.use_past else 1 SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) SCREAMING_SNAKE_CASE_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} SCREAMING_SNAKE_CASE_ = dict(**__magic_name__ , **__magic_name__ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape SCREAMING_SNAKE_CASE_ = common_inputs["decoder_input_ids"].shape[1] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads SCREAMING_SNAKE_CASE_ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) SCREAMING_SNAKE_CASE_ = decoder_seq_length + 3 SCREAMING_SNAKE_CASE_ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) SCREAMING_SNAKE_CASE_ = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 ) SCREAMING_SNAKE_CASE_ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers SCREAMING_SNAKE_CASE_ = min(__magic_name__ , __magic_name__ ) SCREAMING_SNAKE_CASE_ = max(__magic_name__ , __magic_name__ ) - min_num_layers SCREAMING_SNAKE_CASE_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__magic_name__ ): common_inputs["past_key_values"].append( ( torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ ), ) ) # TODO: test this. SCREAMING_SNAKE_CASE_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__magic_name__ , __magic_name__ ): common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) ) return common_inputs def __A ( self : Union[str, Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE_ = seqlen + 2 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads SCREAMING_SNAKE_CASE_ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) SCREAMING_SNAKE_CASE_ = common_inputs["attention_mask"].dtype SCREAMING_SNAKE_CASE_ = torch.cat( [common_inputs["attention_mask"], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) SCREAMING_SNAKE_CASE_ = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ ) ] return common_inputs def __A ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension( __magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE_ = tokenizer.num_special_tokens_to_add(__magic_name__ ) SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension( __magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ ) # Generate dummy inputs according to compute batch and sequence SCREAMING_SNAKE_CASE_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size SCREAMING_SNAKE_CASE_ = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) ) return common_inputs def __A ( self : Optional[Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) elif self.task == "causal-lm": SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_causal_lm( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) else: SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) return common_inputs def __A ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> List[str]: if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_ = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) else: SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self )._flatten_past_key_values_( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
305
0
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class lowerCamelCase (TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ): """simple docstring""" def __init__( self : Optional[int] , __magic_name__ : Optional[int]=None , **__magic_name__ : Tuple ) -> str: super().__init__(features=__magic_name__ ) SCREAMING_SNAKE_CASE_ = torch_tensor_kwargs import torch # noqa import torch at initialization def __A ( self : Any , __magic_name__ : str ) -> List[Any]: import torch if isinstance(__magic_name__ , __magic_name__ ) and column: if all( isinstance(__magic_name__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(__magic_name__ ) return column def __A ( self : int , __magic_name__ : str ) -> Tuple: import torch if isinstance(__magic_name__ , (str, bytes, type(__magic_name__ )) ): return value elif isinstance(__magic_name__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() SCREAMING_SNAKE_CASE_ = {} if isinstance(__magic_name__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): SCREAMING_SNAKE_CASE_ = {"dtype": torch.intaa} elif isinstance(__magic_name__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): SCREAMING_SNAKE_CASE_ = {"dtype": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__magic_name__ , PIL.Image.Image ): SCREAMING_SNAKE_CASE_ = np.asarray(__magic_name__ ) return torch.tensor(__magic_name__ , **{**default_dtype, **self.torch_tensor_kwargs} ) def __A ( self : List[str] , __magic_name__ : List[Any] ) -> Any: import torch # support for torch, tf, jax etc. if hasattr(__magic_name__ , "__array__" ) and not isinstance(__magic_name__ , torch.Tensor ): SCREAMING_SNAKE_CASE_ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__magic_name__ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__magic_name__ ) for substruct in data_struct] ) elif isinstance(__magic_name__ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__magic_name__ ) for substruct in data_struct] ) return self._tensorize(__magic_name__ ) def __A ( self : Any , __magic_name__ : dict ) -> Tuple: return map_nested(self._recursive_tensorize , __magic_name__ , map_list=__magic_name__ ) def __A ( self : Tuple , __magic_name__ : pa.Table ) -> Mapping: SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_row(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_row(__magic_name__ ) return self.recursive_tensorize(__magic_name__ ) def __A ( self : Optional[Any] , __magic_name__ : pa.Table ) -> "torch.Tensor": SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_column(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_column(__magic_name__ , pa_table.column_names[0] ) SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self._consolidate(__magic_name__ ) return column def __A ( self : int , __magic_name__ : pa.Table ) -> Mapping: SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_batch(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_batch(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(__magic_name__ ) for column_name in batch: SCREAMING_SNAKE_CASE_ = self._consolidate(batch[column_name] ) return batch
353
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCamelCase : """simple docstring""" def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : int=100 , __magic_name__ : Optional[Any]=13 , __magic_name__ : Dict=30 , __magic_name__ : Tuple=2 , __magic_name__ : str=3 , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[int]=4 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Any="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=10 , __magic_name__ : Tuple=0.02 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=None , __magic_name__ : Tuple=[0, 1, 2, 3] , ) -> List[str]: SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = 100 SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = out_indices SCREAMING_SNAKE_CASE_ = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE_ = num_patches + 1 def __A ( self : Any ) -> int: SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) SCREAMING_SNAKE_CASE_ = self.get_config() return config, pixel_values, labels, pixel_labels def __A ( self : Dict ) -> Optional[int]: return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __A ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[int]: SCREAMING_SNAKE_CASE_ = BeitModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str ) -> int: SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __A ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> int: SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __A ( self : Tuple , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int ) -> int: SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation(__magic_name__ ) model.to(__magic_name__ ) model.eval() SCREAMING_SNAKE_CASE_ = model(__magic_name__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __A ( self : str ) -> List[Any]: SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowerCamelCase__ = ( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def __A ( self : Tuple ) -> Any: SCREAMING_SNAKE_CASE_ = BeitModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def __A ( self : Dict ) -> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __A ( self : List[str] ) -> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __A ( self : str ) -> List[str]: pass def __A ( self : List[Any] ) -> List[str]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def __A ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ ) SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __magic_name__ ) def __A ( self : Tuple ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def __A ( self : Dict ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__magic_name__ ) def __A ( self : Optional[int] ) -> List[Any]: SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) def __A ( self : Optional[Any] ) -> List[str]: SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ ) def __A ( self : int ) -> Optional[int]: if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]: continue SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ ) model.to(__magic_name__ ) model.train() SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss loss.backward() def __A ( self : Any ) -> Tuple: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ ) model.gradient_checkpointing_enable() model.to(__magic_name__ ) model.train() SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss loss.backward() def __A ( self : List[str] ) -> str: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = _config_zero_init(__magic_name__ ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(config=__magic_name__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def __A ( self : int ) -> Optional[int]: for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = BeitModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def a__ ( ): SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase (unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : List[Any] ) -> str: return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __A ( self : List[str] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).pixel_values.to(__magic_name__ ) # prepare bool_masked_pos SCREAMING_SNAKE_CASE_ = torch.ones((1, 196) , dtype=torch.bool ).to(__magic_name__ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ , bool_masked_pos=__magic_name__ ) SCREAMING_SNAKE_CASE_ = outputs.logits # verify the logits SCREAMING_SNAKE_CASE_ = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , __magic_name__ ) SCREAMING_SNAKE_CASE_ = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __magic_name__ , atol=1e-2 ) ) @slow def __A ( self : Tuple ) -> int: SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = outputs.logits # verify the logits SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , __magic_name__ ) SCREAMING_SNAKE_CASE_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__magic_name__ ) self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) ) SCREAMING_SNAKE_CASE_ = 281 self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ ) @slow def __A ( self : Optional[Any] ) -> int: SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( __magic_name__ ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = outputs.logits # verify the logits SCREAMING_SNAKE_CASE_ = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , __magic_name__ ) SCREAMING_SNAKE_CASE_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__magic_name__ ) self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) ) SCREAMING_SNAKE_CASE_ = 2_396 self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ ) @slow def __A ( self : Tuple ) -> Tuple: SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ ) SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ ) SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] ) SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = outputs.logits # verify the logits SCREAMING_SNAKE_CASE_ = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , __magic_name__ ) SCREAMING_SNAKE_CASE_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: SCREAMING_SNAKE_CASE_ = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=__magic_name__ , ) else: SCREAMING_SNAKE_CASE_ = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=__magic_name__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-4 ) ) @slow def __A ( self : List[str] ) -> Tuple: SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ ) SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ ) SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] ) SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(500, 300)] ) SCREAMING_SNAKE_CASE_ = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , __magic_name__ ) SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ ) SCREAMING_SNAKE_CASE_ = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , __magic_name__ )
305
0
"""simple docstring""" from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer A : Dict = logging.get_logger(__name__) A : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} A : str = { "vocab_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json" }, "merges_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt" }, } A : Optional[Any] = {"allegro/herbert-base-cased": 5_14} A : Optional[Any] = {} class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = HerbertTokenizer def __init__( self : List[str] , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : Tuple="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : Union[str, Any]="<pad>" , __magic_name__ : int="<mask>" , __magic_name__ : List[str]="</s>" , **__magic_name__ : List[Any] , ) -> List[Any]: super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sep_token=__magic_name__ , **__magic_name__ , ) def __A ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: SCREAMING_SNAKE_CASE_ = [self.cls_token_id] SCREAMING_SNAKE_CASE_ = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __A ( self : Optional[int] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is None: return [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] def __A ( self : Union[str, Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ )
354
from __future__ import annotations A : Dict = "#" class lowerCamelCase : """simple docstring""" def __init__( self : Dict ) -> None: SCREAMING_SNAKE_CASE_ = {} def __A ( self : List[Any] , __magic_name__ : str ) -> None: SCREAMING_SNAKE_CASE_ = self._trie for char in text: if char not in trie: SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = trie[char] SCREAMING_SNAKE_CASE_ = True def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list: SCREAMING_SNAKE_CASE_ = self._trie for char in prefix: if char in trie: SCREAMING_SNAKE_CASE_ = trie[char] else: return [] return self._elements(__magic_name__ ) def __A ( self : int , __magic_name__ : dict ) -> tuple: SCREAMING_SNAKE_CASE_ = [] for c, v in d.items(): SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )] result.extend(__magic_name__ ) return tuple(__magic_name__ ) A : Union[str, Any] = Trie() A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal") for word in words: trie.insert_word(word) def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase ) return tuple(string + word for word in suffixes ) def a__ ( ): print(autocomplete_using_trie("de" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
305
0
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging A : Optional[int] = logging.get_logger(__name__) logging.set_verbosity_info() def a__ ( __UpperCamelCase , __UpperCamelCase ): if "xprophetnet" in prophetnet_checkpoint_path: SCREAMING_SNAKE_CASE_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = XLMProphetNetForConditionalGeneration.from_pretrained( __UpperCamelCase , output_loading_info=__UpperCamelCase ) else: SCREAMING_SNAKE_CASE_ = ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ProphetNetForConditionalGeneration.from_pretrained( __UpperCamelCase , output_loading_info=__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = ["key_proj", "value_proj", "query_proj"] SCREAMING_SNAKE_CASE_ = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: SCREAMING_SNAKE_CASE_ = key.split("." ) if attributes[0] == "lm_head": SCREAMING_SNAKE_CASE_ = prophet SCREAMING_SNAKE_CASE_ = prophet_old else: SCREAMING_SNAKE_CASE_ = prophet.prophetnet SCREAMING_SNAKE_CASE_ = prophet_old.model SCREAMING_SNAKE_CASE_ = False for attribute in attributes: if attribute in mapping: SCREAMING_SNAKE_CASE_ = mapping[attribute] if not hasattr(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE_ = attribute elif hasattr(__UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" SCREAMING_SNAKE_CASE_ = old_model.weight logger.info(F'''{attribute} is initialized.''' ) SCREAMING_SNAKE_CASE_ = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" SCREAMING_SNAKE_CASE_ = old_model.bias logger.info(F'''{attribute} is initialized''' ) SCREAMING_SNAKE_CASE_ = True break elif attribute in special_keys and hasattr(__UpperCamelCase , "in_proj_weight" ): SCREAMING_SNAKE_CASE_ = old_model.in_proj_weight.shape[0] // 3 SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) SCREAMING_SNAKE_CASE_ = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings." SCREAMING_SNAKE_CASE_ = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] ) SCREAMING_SNAKE_CASE_ = True break if attribute.isdigit(): SCREAMING_SNAKE_CASE_ = model[int(__UpperCamelCase )] SCREAMING_SNAKE_CASE_ = old_model[int(__UpperCamelCase )] else: SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ) if old_attribute == "": SCREAMING_SNAKE_CASE_ = old_model else: if not hasattr(__UpperCamelCase , __UpperCamelCase ): raise ValueError(F'''{old_model} does not have {old_attribute}''' ) SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ) if not is_key_init: raise ValueError(F'''{key} was not correctly initialized!''' ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) A : Union[str, Any] = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
355
from collections import deque class lowerCamelCase : """simple docstring""" def __init__( self : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> None: SCREAMING_SNAKE_CASE_ = process_name # process name SCREAMING_SNAKE_CASE_ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time SCREAMING_SNAKE_CASE_ = arrival_time SCREAMING_SNAKE_CASE_ = burst_time # remaining burst time SCREAMING_SNAKE_CASE_ = 0 # total time of the process wait in ready queue SCREAMING_SNAKE_CASE_ = 0 # time from arrival time to completion time class lowerCamelCase : """simple docstring""" def __init__( self : Tuple , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : deque[Process] , __magic_name__ : int , ) -> None: # total number of mlfq's queues SCREAMING_SNAKE_CASE_ = number_of_queues # time slice of queues that round robin algorithm applied SCREAMING_SNAKE_CASE_ = time_slices # unfinished process is in this ready_queue SCREAMING_SNAKE_CASE_ = queue # current time SCREAMING_SNAKE_CASE_ = current_time # finished process is in this sequence queue SCREAMING_SNAKE_CASE_ = deque() def __A ( self : Dict ) -> list[str]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(__magic_name__ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(__magic_name__ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def __A ( self : Tuple , __magic_name__ : list[Process] ) -> list[int]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(__magic_name__ ) ): completion_times.append(queue[i].stop_time ) return completion_times def __A ( self : str , __magic_name__ : deque[Process] ) -> list[int]: return [q.burst_time for q in queue] def __A ( self : Optional[Any] , __magic_name__ : Process ) -> int: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def __A ( self : Optional[Any] , __magic_name__ : deque[Process] ) -> deque[Process]: SCREAMING_SNAKE_CASE_ = deque() # sequence deque of finished process while len(__magic_name__ ) != 0: SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(__magic_name__ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 SCREAMING_SNAKE_CASE_ = 0 # set the process's turnaround time because it is finished SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time # set the completion time SCREAMING_SNAKE_CASE_ = self.current_time # add the process to queue that has finished queue finished.append(__magic_name__ ) self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def __A ( self : Any , __magic_name__ : deque[Process] , __magic_name__ : int ) -> tuple[deque[Process], deque[Process]]: SCREAMING_SNAKE_CASE_ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(__magic_name__ ) ): SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(__magic_name__ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time SCREAMING_SNAKE_CASE_ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(__magic_name__ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished SCREAMING_SNAKE_CASE_ = 0 # set the finish time SCREAMING_SNAKE_CASE_ = self.current_time # update the process' turnaround time because it is finished SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(__magic_name__ ) self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def __A ( self : Any ) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest A : Dict = Process("P1", 0, 53) A : str = Process("P2", 0, 17) A : List[Any] = Process("P3", 0, 68) A : List[str] = Process("P4", 0, 24) A : Dict = 3 A : Any = [17, 25] A : Dict = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])}) A : Union[str, Any] = Process("P1", 0, 53) A : Any = Process("P2", 0, 17) A : Dict = Process("P3", 0, 68) A : List[str] = Process("P4", 0, 24) A : Optional[int] = 3 A : int = [17, 25] A : Union[str, Any] = deque([Pa, Pa, Pa, Pa]) A : Tuple = MLFQ(number_of_queues, time_slices, queue, 0) A : Tuple = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f"waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print completion times of processes(P1, P2, P3, P4) print( f"completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print total turnaround times of processes(P1, P2, P3, P4) print( f"turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print sequence of finished processes print( f"sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}" )
305
0
"""simple docstring""" from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar A : List[Any] = TypeVar("T") class lowerCamelCase (Generic[T] ): """simple docstring""" def __init__( self : Optional[int] , __magic_name__ : list[T] , __magic_name__ : Callable[[T, T], T] ) -> None: SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = len(__magic_name__ ) SCREAMING_SNAKE_CASE_ = [any_type for _ in range(self.N )] + arr SCREAMING_SNAKE_CASE_ = fnc self.build() def __A ( self : Tuple ) -> None: for p in range(self.N - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE_ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __A ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : T ) -> None: p += self.N SCREAMING_SNAKE_CASE_ = v while p > 1: SCREAMING_SNAKE_CASE_ = p // 2 SCREAMING_SNAKE_CASE_ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __A ( self : int , __magic_name__ : int , __magic_name__ : int ) -> T | None: # noqa: E741 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = l + self.N, r + self.N SCREAMING_SNAKE_CASE_ = None while l <= r: if l % 2 == 1: SCREAMING_SNAKE_CASE_ = self.st[l] if res is None else self.fn(__magic_name__ , self.st[l] ) if r % 2 == 0: SCREAMING_SNAKE_CASE_ = self.st[r] if res is None else self.fn(__magic_name__ , self.st[r] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce A : Tuple = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] A : Optional[int] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } A : Dict = SegmentTree(test_array, min) A : Optional[int] = SegmentTree(test_array, max) A : Any = SegmentTree(test_array, lambda a, b: a + b) def a__ ( ): for i in range(len(__UpperCamelCase ) ): for j in range(__UpperCamelCase , len(__UpperCamelCase ) ): SCREAMING_SNAKE_CASE_ = reduce(__UpperCamelCase , test_array[i : j + 1] ) SCREAMING_SNAKE_CASE_ = reduce(__UpperCamelCase , test_array[i : j + 1] ) SCREAMING_SNAKE_CASE_ = reduce(lambda __UpperCamelCase , __UpperCamelCase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__UpperCamelCase , __UpperCamelCase ) assert max_range == max_segment_tree.query(__UpperCamelCase , __UpperCamelCase ) assert sum_range == sum_segment_tree.query(__UpperCamelCase , __UpperCamelCase ) test_all_segments() for index, value in test_updates.items(): A : List[Any] = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
356
import torch def a__ ( ): if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = torch.cuda.device_count() else: SCREAMING_SNAKE_CASE_ = 0 print(F'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
305
0
from sklearn.metrics import matthews_corrcoef import datasets A : Optional[Any] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n" A : Union[str, Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n" A : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase (datasets.Metric ): """simple docstring""" def __A ( self : Tuple ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html" ] , ) def __A ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict=None ) -> Optional[Any]: return { "matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ), }
357
from collections.abc import Generator from math import sin def a__ ( __UpperCamelCase ): if len(__UpperCamelCase ) != 3_2: raise ValueError("Input must be of length 32" ) SCREAMING_SNAKE_CASE_ = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def a__ ( __UpperCamelCase ): if i < 0: raise ValueError("Input must be non-negative" ) SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "08x" )[-8:] SCREAMING_SNAKE_CASE_ = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = b"" for char in message: bit_string += format(__UpperCamelCase , "08b" ).encode("utf-8" ) SCREAMING_SNAKE_CASE_ = format(len(__UpperCamelCase ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__UpperCamelCase ) % 5_1_2 != 4_4_8: bit_string += b"0" bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] ) return bit_string def a__ ( __UpperCamelCase ): if len(__UpperCamelCase ) % 5_1_2 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(__UpperCamelCase ) , 5_1_2 ): SCREAMING_SNAKE_CASE_ = bit_string[pos : pos + 5_1_2] SCREAMING_SNAKE_CASE_ = [] for i in range(0 , 5_1_2 , 3_2 ): block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) ) yield block_words def a__ ( __UpperCamelCase ): if i < 0: raise ValueError("Input must be non-negative" ) SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "032b" ) SCREAMING_SNAKE_CASE_ = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(__UpperCamelCase , 2 ) def a__ ( __UpperCamelCase , __UpperCamelCase ): return (a + b) % 2**3_2 def a__ ( __UpperCamelCase , __UpperCamelCase ): if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2 def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = preprocess(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )] # Starting states SCREAMING_SNAKE_CASE_ = 0X67452301 SCREAMING_SNAKE_CASE_ = 0Xefcdab89 SCREAMING_SNAKE_CASE_ = 0X98badcfe SCREAMING_SNAKE_CASE_ = 0X10325476 SCREAMING_SNAKE_CASE_ = [ 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__UpperCamelCase ): SCREAMING_SNAKE_CASE_ = aa SCREAMING_SNAKE_CASE_ = ba SCREAMING_SNAKE_CASE_ = ca SCREAMING_SNAKE_CASE_ = da # Hash current chunk for i in range(6_4 ): if i <= 1_5: # f = (b & c) | (not_32(b) & d) # Alternate definition for f SCREAMING_SNAKE_CASE_ = d ^ (b & (c ^ d)) SCREAMING_SNAKE_CASE_ = i elif i <= 3_1: # f = (d & b) | (not_32(d) & c) # Alternate definition for f SCREAMING_SNAKE_CASE_ = c ^ (d & (b ^ c)) SCREAMING_SNAKE_CASE_ = (5 * i + 1) % 1_6 elif i <= 4_7: SCREAMING_SNAKE_CASE_ = b ^ c ^ d SCREAMING_SNAKE_CASE_ = (3 * i + 5) % 1_6 else: SCREAMING_SNAKE_CASE_ = c ^ (b | not_aa(__UpperCamelCase )) SCREAMING_SNAKE_CASE_ = (7 * i) % 1_6 SCREAMING_SNAKE_CASE_ = (f + a + added_consts[i] + block_words[g]) % 2**3_2 SCREAMING_SNAKE_CASE_ = d SCREAMING_SNAKE_CASE_ = c SCREAMING_SNAKE_CASE_ = b SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) ) # Add hashed chunk to running total SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
305
0
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] = None , __magic_name__ : int = 50_257 , __magic_name__ : int = 1_024 , __magic_name__ : int = 768 , __magic_name__ : int = 12 , __magic_name__ : int = 12 , __magic_name__ : Optional[int] = None , __magic_name__ : str = "gelu_new" , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 1e-5 , __magic_name__ : float = 0.02 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> Dict: super().__init__() SCREAMING_SNAKE_CASE_ = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) SCREAMING_SNAKE_CASE_ = prefix_inner_dim SCREAMING_SNAKE_CASE_ = prefix_hidden_dim SCREAMING_SNAKE_CASE_ = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE_ = ( nn.Linear(self.prefix_hidden_dim , __magic_name__ ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE_ = GPTaConfig( vocab_size=__magic_name__ , n_positions=__magic_name__ , n_embd=__magic_name__ , n_layer=__magic_name__ , n_head=__magic_name__ , n_inner=__magic_name__ , activation_function=__magic_name__ , resid_pdrop=__magic_name__ , embd_pdrop=__magic_name__ , attn_pdrop=__magic_name__ , layer_norm_epsilon=__magic_name__ , initializer_range=__magic_name__ , scale_attn_weights=__magic_name__ , use_cache=__magic_name__ , scale_attn_by_inverse_layer_idx=__magic_name__ , reorder_and_upcast_attn=__magic_name__ , ) SCREAMING_SNAKE_CASE_ = GPTaLMHeadModel(__magic_name__ ) def __A ( self : Optional[int] , __magic_name__ : torch.Tensor , __magic_name__ : torch.Tensor , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , ) -> List[Any]: SCREAMING_SNAKE_CASE_ = self.transformer.transformer.wte(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.encode_prefix(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.decode_prefix(__magic_name__ ) SCREAMING_SNAKE_CASE_ = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: SCREAMING_SNAKE_CASE_ = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) SCREAMING_SNAKE_CASE_ = torch.cat((dummy_token, input_ids) , dim=1 ) SCREAMING_SNAKE_CASE_ = self.transformer(inputs_embeds=__magic_name__ , labels=__magic_name__ , attention_mask=__magic_name__ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __A ( self : str , __magic_name__ : int , __magic_name__ : torch.device ) -> torch.Tensor: return torch.zeros(__magic_name__ , self.prefix_length , dtype=torch.intaa , device=__magic_name__ ) def __A ( self : str , __magic_name__ : List[str] ) -> List[Any]: return self.encode_prefix(__magic_name__ ) @torch.no_grad() def __A ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> Any: SCREAMING_SNAKE_CASE_ = torch.split(__magic_name__ , 1 , dim=0 ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for feature in features: SCREAMING_SNAKE_CASE_ = self.decode_prefix(feature.to(__magic_name__ ) ) # back to the clip feature # Only support beam search for now SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.generate_beam( input_embeds=__magic_name__ , device=__magic_name__ , eos_token_id=__magic_name__ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) SCREAMING_SNAKE_CASE_ = torch.stack(__magic_name__ ) SCREAMING_SNAKE_CASE_ = torch.stack(__magic_name__ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __A ( self : str , __magic_name__ : Optional[int]=None , __magic_name__ : List[str]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : int = 5 , __magic_name__ : int = 67 , __magic_name__ : float = 1.0 , __magic_name__ : Optional[int] = None , ) -> int: SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = torch.ones(__magic_name__ , device=__magic_name__ , dtype=torch.int ) SCREAMING_SNAKE_CASE_ = torch.zeros(__magic_name__ , device=__magic_name__ , dtype=torch.bool ) if input_embeds is not None: SCREAMING_SNAKE_CASE_ = input_embeds else: SCREAMING_SNAKE_CASE_ = self.transformer.transformer.wte(__magic_name__ ) for i in range(__magic_name__ ): SCREAMING_SNAKE_CASE_ = self.transformer(inputs_embeds=__magic_name__ ) SCREAMING_SNAKE_CASE_ = outputs.logits SCREAMING_SNAKE_CASE_ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) SCREAMING_SNAKE_CASE_ = logits.softmax(-1 ).log() if scores is None: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = logits.topk(__magic_name__ , -1 ) SCREAMING_SNAKE_CASE_ = generated.expand(__magic_name__ , *generated.shape[1:] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: SCREAMING_SNAKE_CASE_ = next_tokens else: SCREAMING_SNAKE_CASE_ = tokens.expand(__magic_name__ , *tokens.shape[1:] ) SCREAMING_SNAKE_CASE_ = torch.cat((tokens, next_tokens) , dim=1 ) else: SCREAMING_SNAKE_CASE_ = -float(np.inf ) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = scores[:, None] + logits seq_lengths[~is_stopped] += 1 SCREAMING_SNAKE_CASE_ = scores_sum / seq_lengths[:, None] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = scores_sum_average.view(-1 ).topk(__magic_name__ , -1 ) SCREAMING_SNAKE_CASE_ = next_tokens // scores_sum.shape[1] SCREAMING_SNAKE_CASE_ = seq_lengths[next_tokens_source] SCREAMING_SNAKE_CASE_ = next_tokens % scores_sum.shape[1] SCREAMING_SNAKE_CASE_ = next_tokens.unsqueeze(1 ) SCREAMING_SNAKE_CASE_ = tokens[next_tokens_source] SCREAMING_SNAKE_CASE_ = torch.cat((tokens, next_tokens) , dim=1 ) SCREAMING_SNAKE_CASE_ = generated[next_tokens_source] SCREAMING_SNAKE_CASE_ = scores_sum_average * seq_lengths SCREAMING_SNAKE_CASE_ = is_stopped[next_tokens_source] SCREAMING_SNAKE_CASE_ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) SCREAMING_SNAKE_CASE_ = torch.cat((generated, next_token_embed) , dim=1 ) SCREAMING_SNAKE_CASE_ = is_stopped + next_tokens.eq(__magic_name__ ).squeeze() if is_stopped.all(): break SCREAMING_SNAKE_CASE_ = scores / seq_lengths SCREAMING_SNAKE_CASE_ = scores.argsort(descending=__magic_name__ ) # tokens tensors are already padded to max_seq_length SCREAMING_SNAKE_CASE_ = [tokens[i] for i in order] SCREAMING_SNAKE_CASE_ = torch.stack(__magic_name__ , dim=0 ) SCREAMING_SNAKE_CASE_ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
358
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowerCamelCase (unittest.TestCase ): """simple docstring""" def __A ( self : int ) -> Any: SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = BlipImageProcessor() SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ ) processor.save_pretrained(self.tmpdirname ) def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor def __A ( self : int ) -> Any: shutil.rmtree(self.tmpdirname ) def __A ( self : Dict ) -> Dict: SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) def __A ( self : Tuple ) -> int: SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" ) SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) SCREAMING_SNAKE_CASE_ = "lower newer" SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Dict ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) SCREAMING_SNAKE_CASE_ = "lower newer" SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def __A ( self : Dict ) -> Tuple: SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def __A ( self : List[str] ) -> int: SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) SCREAMING_SNAKE_CASE_ = "lower newer" SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
305
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: A : Tuple = None A : Any = logging.get_logger(__name__) A : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} A : str = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), }, "tokenizer_file": { "google/bigbird-roberta-base": ( "https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json" ), "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json" ), }, } A : int = { "google/bigbird-roberta-base": 40_96, "google/bigbird-roberta-large": 40_96, "google/bigbird-base-trivia-itc": 40_96, } A : Optional[Any] = "▁" class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = BigBirdTokenizer lowerCamelCase__ = ['''input_ids''', '''attention_mask'''] lowerCamelCase__ = [] def __init__( self : Optional[int] , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : List[Any]="<unk>" , __magic_name__ : Tuple="<s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Dict="<pad>" , __magic_name__ : str="[SEP]" , __magic_name__ : Dict="[MASK]" , __magic_name__ : int="[CLS]" , **__magic_name__ : Optional[int] , ) -> str: SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token super().__init__( __magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , **__magic_name__ , ) SCREAMING_SNAKE_CASE_ = vocab_file SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True def __A ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __A ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] def __A ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__magic_name__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE_ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ): copyfile(self.vocab_file , __magic_name__ ) return (out_vocab_file,)
359
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable A : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = ["DPTFeatureExtractor"] A : str = ["DPTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", "DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
305
0
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter A : int = logging.get_logger(__name__) A : Dict[Optional[str], Type[Formatter]] = {} A : Dict[Optional[str], str] = {} A : Dict[Optional[str], Exception] = {} def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , ): SCREAMING_SNAKE_CASE_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) SCREAMING_SNAKE_CASE_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) SCREAMING_SNAKE_CASE_ = format_type def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None ): SCREAMING_SNAKE_CASE_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): SCREAMING_SNAKE_CASE_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["python"]) _register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) _register_formatter(NumpyFormatter, "numpy", aliases=["np"]) _register_formatter(PandasFormatter, "pandas", aliases=["pd"]) _register_formatter(CustomFormatter, "custom") if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) else: A : str = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) else: A : int = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, "jax", aliases=[]) else: A : List[Any] = ValueError("JAX needs to be installed to be able to return JAX arrays.") _register_unavailable_formatter(_jax_error, "jax", aliases=[]) def a__ ( __UpperCamelCase ): if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def a__ ( __UpperCamelCase , **__UpperCamelCase ): SCREAMING_SNAKE_CASE_ = get_format_type_from_alias(__UpperCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**__UpperCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
360
from __future__ import annotations import collections import pprint from pathlib import Path def a__ ( __UpperCamelCase ): return "".join(sorted(__UpperCamelCase ) ) def a__ ( __UpperCamelCase ): return word_by_signature[signature(__UpperCamelCase )] A : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") A : int = sorted({word.strip().lower() for word in data.splitlines()}) A : Tuple = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": A : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
305
0
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) A : str = logging.getLogger(__name__) @dataclass(frozen=SCREAMING_SNAKE_CASE__ ) class lowerCamelCase : """simple docstring""" lowerCamelCase__ = 4_2 lowerCamelCase__ = 4_2 lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None @dataclass(frozen=SCREAMING_SNAKE_CASE__ ) class lowerCamelCase : """simple docstring""" lowerCamelCase__ = 4_2 lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if is_torch_available(): import torch from torch.utils.data import Dataset class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = 4_2 def __init__( self : Dict , __magic_name__ : str , __magic_name__ : PreTrainedTokenizer , __magic_name__ : str , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int]=False , __magic_name__ : bool = False , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ = hans_processors[task]() SCREAMING_SNAKE_CASE_ = os.path.join( __magic_name__ , "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__magic_name__ ) , __magic_name__ , ) , ) SCREAMING_SNAKE_CASE_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_list[2], label_list[1] SCREAMING_SNAKE_CASE_ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. SCREAMING_SNAKE_CASE_ = cached_features_file + ".lock" with FileLock(__magic_name__ ): if os.path.exists(__magic_name__ ) and not overwrite_cache: logger.info(F'''Loading features from cached file {cached_features_file}''' ) SCREAMING_SNAKE_CASE_ = torch.load(__magic_name__ ) else: logger.info(F'''Creating features from dataset file at {data_dir}''' ) SCREAMING_SNAKE_CASE_ = ( processor.get_dev_examples(__magic_name__ ) if evaluate else processor.get_train_examples(__magic_name__ ) ) logger.info("Training examples: %s" , len(__magic_name__ ) ) SCREAMING_SNAKE_CASE_ = hans_convert_examples_to_features(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) logger.info("Saving features into cached file %s" , __magic_name__ ) torch.save(self.features , __magic_name__ ) def __len__( self : List[Any] ) -> Dict: """simple docstring""" return len(self.features ) def __getitem__( self : str , __magic_name__ : Any ) -> InputFeatures: """simple docstring""" return self.features[i] def __A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class lowerCamelCase : """simple docstring""" lowerCamelCase__ = 4_2 def __init__( self : List[str] , __magic_name__ : str , __magic_name__ : PreTrainedTokenizer , __magic_name__ : str , __magic_name__ : Optional[int] = 128 , __magic_name__ : Union[str, Any]=False , __magic_name__ : bool = False , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = hans_processors[task]() SCREAMING_SNAKE_CASE_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_list[2], label_list[1] SCREAMING_SNAKE_CASE_ = label_list SCREAMING_SNAKE_CASE_ = processor.get_dev_examples(__magic_name__ ) if evaluate else processor.get_train_examples(__magic_name__ ) SCREAMING_SNAKE_CASE_ = hans_convert_examples_to_features(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ): if ex_index % 10_000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(__magic_name__ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) SCREAMING_SNAKE_CASE_ = tf.data.Dataset.from_generator( __magic_name__ , ( { "example_id": tf.intaa, "input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa, }, tf.intaa, ) , ( { "example_id": tf.TensorShape([] ), "input_ids": tf.TensorShape([None, None] ), "attention_mask": tf.TensorShape([None, None] ), "token_type_ids": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def __A ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return self.dataset def __len__( self : List[Any] ) -> Union[str, Any]: """simple docstring""" return len(self.features ) def __getitem__( self : int , __magic_name__ : List[str] ) -> InputFeatures: """simple docstring""" return self.features[i] def __A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return self.label_list class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __A ( self : int , __magic_name__ : str ) -> Union[str, Any]: """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(__magic_name__ , "heuristics_train_set.txt" ) ) , "train" ) def __A ( self : Optional[int] , __magic_name__ : int ) -> List[Any]: """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(__magic_name__ , "heuristics_evaluation_set.txt" ) ) , "dev" ) def __A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return ["contradiction", "entailment", "neutral"] def __A ( self : List[str] , __magic_name__ : str , __magic_name__ : List[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for i, line in enumerate(__magic_name__ ): if i == 0: continue SCREAMING_SNAKE_CASE_ = "%s-%s" % (set_type, line[0]) SCREAMING_SNAKE_CASE_ = line[5] SCREAMING_SNAKE_CASE_ = line[6] SCREAMING_SNAKE_CASE_ = line[7][2:] if line[7].startswith("ex" ) else line[7] SCREAMING_SNAKE_CASE_ = line[0] examples.append(InputExample(guid=__magic_name__ , text_a=__magic_name__ , text_b=__magic_name__ , label=__magic_name__ , pairID=__magic_name__ ) ) return examples def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): SCREAMING_SNAKE_CASE_ = {label: i for i, label in enumerate(__UpperCamelCase )} SCREAMING_SNAKE_CASE_ = [] for ex_index, example in tqdm.tqdm(enumerate(__UpperCamelCase ) , desc="convert examples to features" ): if ex_index % 1_0_0_0_0 == 0: logger.info("Writing example %d" % (ex_index) ) SCREAMING_SNAKE_CASE_ = tokenizer( example.text_a , example.text_b , add_special_tokens=__UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" , truncation=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , ) SCREAMING_SNAKE_CASE_ = label_map[example.label] if example.label in label_map else 0 SCREAMING_SNAKE_CASE_ = int(example.pairID ) features.append(InputFeatures(**__UpperCamelCase , label=__UpperCamelCase , pairID=__UpperCamelCase ) ) for i, example in enumerate(examples[:5] ): logger.info("*** Example ***" ) logger.info(F'''guid: {example}''' ) logger.info(F'''features: {features[i]}''' ) return features A : Union[str, Any] = { "hans": 3, } A : Dict = { "hans": HansProcessor, }
361
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging A : int = logging.get_logger(__name__) A : str = { "kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json", } class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''align_text_model''' def __init__( self : Optional[Any] , __magic_name__ : Union[str, Any]=30_522 , __magic_name__ : Tuple=768 , __magic_name__ : List[str]=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : str=3_072 , __magic_name__ : Dict="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : Any=2 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : int=1e-12 , __magic_name__ : str=0 , __magic_name__ : Optional[Any]="absolute" , __magic_name__ : Optional[Any]=True , **__magic_name__ : Tuple , ) -> Union[str, Any]: super().__init__(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = position_embedding_type SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = pad_token_id @classmethod def __A ( cls : Any , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Optional[Any] ) -> "PretrainedConfig": cls._set_token_in_kwargs(__magic_name__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the text config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": SCREAMING_SNAKE_CASE_ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''align_vision_model''' def __init__( self : List[str] , __magic_name__ : int = 3 , __magic_name__ : int = 600 , __magic_name__ : float = 2.0 , __magic_name__ : float = 3.1 , __magic_name__ : int = 8 , __magic_name__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ : List[int] = [] , __magic_name__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ : float = 0.25 , __magic_name__ : str = "swish" , __magic_name__ : int = 2_560 , __magic_name__ : str = "mean" , __magic_name__ : float = 0.02 , __magic_name__ : float = 0.001 , __magic_name__ : float = 0.99 , __magic_name__ : float = 0.2 , **__magic_name__ : List[Any] , ) -> Tuple: super().__init__(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = width_coefficient SCREAMING_SNAKE_CASE_ = depth_coefficient SCREAMING_SNAKE_CASE_ = depth_divisor SCREAMING_SNAKE_CASE_ = kernel_sizes SCREAMING_SNAKE_CASE_ = in_channels SCREAMING_SNAKE_CASE_ = out_channels SCREAMING_SNAKE_CASE_ = depthwise_padding SCREAMING_SNAKE_CASE_ = strides SCREAMING_SNAKE_CASE_ = num_block_repeats SCREAMING_SNAKE_CASE_ = expand_ratios SCREAMING_SNAKE_CASE_ = squeeze_expansion_ratio SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dim SCREAMING_SNAKE_CASE_ = pooling_type SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = batch_norm_eps SCREAMING_SNAKE_CASE_ = batch_norm_momentum SCREAMING_SNAKE_CASE_ = drop_connect_rate SCREAMING_SNAKE_CASE_ = sum(__magic_name__ ) * 4 @classmethod def __A ( cls : List[str] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Dict ) -> "PretrainedConfig": cls._set_token_in_kwargs(__magic_name__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": SCREAMING_SNAKE_CASE_ = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''align''' lowerCamelCase__ = True def __init__( self : Optional[Any] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : str=640 , __magic_name__ : Any=1.0 , __magic_name__ : Dict=0.02 , **__magic_name__ : Union[str, Any] , ) -> int: super().__init__(**__magic_name__ ) if text_config is None: SCREAMING_SNAKE_CASE_ = {} logger.info("text_config is None. Initializing the AlignTextConfig with default values." ) if vision_config is None: SCREAMING_SNAKE_CASE_ = {} logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." ) SCREAMING_SNAKE_CASE_ = AlignTextConfig(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = AlignVisionConfig(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = projection_dim SCREAMING_SNAKE_CASE_ = temperature_init_value SCREAMING_SNAKE_CASE_ = initializer_range @classmethod def __A ( cls : List[str] , __magic_name__ : AlignTextConfig , __magic_name__ : AlignVisionConfig , **__magic_name__ : Tuple ) -> Any: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ ) def __A ( self : int ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ = self.text_config.to_dict() SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict() SCREAMING_SNAKE_CASE_ = self.__class__.model_type return output
305
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase (unittest.TestCase ): """simple docstring""" @property def __A ( self : Optional[int] ) -> Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def __A ( self : Any ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = self.dummy_uncond_unet SCREAMING_SNAKE_CASE_ = PNDMScheduler() SCREAMING_SNAKE_CASE_ = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pndm.to(__magic_name__ ) pndm.set_progress_bar_config(disable=__magic_name__ ) SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type="numpy" ).images SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type="numpy" , return_dict=__magic_name__ )[0] SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCamelCase (unittest.TestCase ): """simple docstring""" def __A ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE_ = "google/ddpm-cifar10-32" SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(__magic_name__ ) SCREAMING_SNAKE_CASE_ = PNDMScheduler() SCREAMING_SNAKE_CASE_ = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pndm.to(__magic_name__ ) pndm.set_progress_bar_config(disable=__magic_name__ ) SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = pndm(generator=__magic_name__ , output_type="numpy" ).images SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_ = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
362
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def a__ ( __UpperCamelCase ): return x + 2 class lowerCamelCase (unittest.TestCase ): """simple docstring""" def __A ( self : List[Any] ) -> int: SCREAMING_SNAKE_CASE_ = "x = 3" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"x": 3} ) SCREAMING_SNAKE_CASE_ = "x = y" SCREAMING_SNAKE_CASE_ = {"y": 5} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} ) def __A ( self : Union[str, Any] ) -> str: SCREAMING_SNAKE_CASE_ = "y = add_two(x)" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} ) # Won't work without the tool with CaptureStdout() as out: SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result is None assert "tried to execute add_two" in out.out def __A ( self : List[str] ) -> int: SCREAMING_SNAKE_CASE_ = "x = 3" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"x": 3} ) def __A ( self : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ ) self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} ) self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def __A ( self : Optional[int] ) -> List[str]: SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} ) def __A ( self : Any ) -> List[str]: SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} ) def __A ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} ) SCREAMING_SNAKE_CASE_ = {"x": 8} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} ) def __A ( self : str ) -> str: SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ ) self.assertListEqual(__magic_name__ , [3, 5] ) self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} ) def __A ( self : Union[str, Any] ) -> List[Any]: SCREAMING_SNAKE_CASE_ = "y = x" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} ) def __A ( self : Tuple ) -> List[Any]: SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} ) SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" SCREAMING_SNAKE_CASE_ = {"x": 3} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def __A ( self : Tuple ) -> Any: SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ ) assert result == 2 self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
305
0
class lowerCamelCase : """simple docstring""" def __init__( self : Dict ) -> List[str]: SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = {} def __A ( self : Tuple , __magic_name__ : int ) -> str: if vertex not in self.adjacency: SCREAMING_SNAKE_CASE_ = {} self.num_vertices += 1 def __A ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]: self.add_vertex(__magic_name__ ) self.add_vertex(__magic_name__ ) if head == tail: return SCREAMING_SNAKE_CASE_ = weight SCREAMING_SNAKE_CASE_ = weight def __A ( self : int ) -> List[Any]: SCREAMING_SNAKE_CASE_ = self.get_edges() for edge in edges: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge edges.remove((tail, head, weight) ) for i in range(len(__magic_name__ ) ): SCREAMING_SNAKE_CASE_ = list(edges[i] ) edges.sort(key=lambda __magic_name__ : e[2] ) for i in range(len(__magic_name__ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: SCREAMING_SNAKE_CASE_ = edges[i][2] + 1 for edge in edges: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge SCREAMING_SNAKE_CASE_ = weight SCREAMING_SNAKE_CASE_ = weight def __str__( self : Optional[Any] ) -> List[str]: SCREAMING_SNAKE_CASE_ = "" for tail in self.adjacency: for head in self.adjacency[tail]: SCREAMING_SNAKE_CASE_ = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip("\n" ) def __A ( self : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE_ = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __A ( self : Union[str, Any] ) -> Optional[int]: return self.adjacency.keys() @staticmethod def __A ( __magic_name__ : List[Any]=None , __magic_name__ : Tuple=None ) -> Any: SCREAMING_SNAKE_CASE_ = Graph() if vertices is None: SCREAMING_SNAKE_CASE_ = [] if edges is None: SCREAMING_SNAKE_CASE_ = [] for vertex in vertices: g.add_vertex(__magic_name__ ) for edge in edges: g.add_edge(*__magic_name__ ) return g class lowerCamelCase : """simple docstring""" def __init__( self : Optional[int] ) -> List[Any]: SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = {} def __len__( self : Optional[int] ) -> Optional[int]: return len(self.parent ) def __A ( self : str , __magic_name__ : int ) -> Optional[int]: if item in self.parent: return self.find(__magic_name__ ) SCREAMING_SNAKE_CASE_ = item SCREAMING_SNAKE_CASE_ = 0 return item def __A ( self : Any , __magic_name__ : List[str] ) -> Any: if item not in self.parent: return self.make_set(__magic_name__ ) if item != self.parent[item]: SCREAMING_SNAKE_CASE_ = self.find(self.parent[item] ) return self.parent[item] def __A ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> List[str]: SCREAMING_SNAKE_CASE_ = self.find(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.find(__magic_name__ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: SCREAMING_SNAKE_CASE_ = roota return roota if self.rank[roota] < self.rank[roota]: SCREAMING_SNAKE_CASE_ = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 SCREAMING_SNAKE_CASE_ = roota return roota return None @staticmethod def __A ( __magic_name__ : Optional[int] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = graph.num_vertices SCREAMING_SNAKE_CASE_ = Graph.UnionFind() SCREAMING_SNAKE_CASE_ = [] while num_components > 1: SCREAMING_SNAKE_CASE_ = {} for vertex in graph.get_vertices(): SCREAMING_SNAKE_CASE_ = -1 SCREAMING_SNAKE_CASE_ = graph.get_edges() for edge in edges: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge edges.remove((tail, head, weight) ) for edge in edges: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge SCREAMING_SNAKE_CASE_ = union_find.find(__magic_name__ ) SCREAMING_SNAKE_CASE_ = union_find.find(__magic_name__ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: SCREAMING_SNAKE_CASE_ = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: SCREAMING_SNAKE_CASE_ = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cheap_edge[vertex] if union_find.find(__magic_name__ ) != union_find.find(__magic_name__ ): union_find.union(__magic_name__ , __magic_name__ ) mst_edges.append(cheap_edge[vertex] ) SCREAMING_SNAKE_CASE_ = num_components - 1 SCREAMING_SNAKE_CASE_ = Graph.build(edges=__magic_name__ ) return mst
363
import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] ) SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = (1, 2, 1) SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7) SCREAMING_SNAKE_CASE_ = SARIMAX( __UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" ) SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] ) return result[0] def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase ) return y_pred[0] def a__ ( __UpperCamelCase ): train_user.sort() SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 ) SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 ) SCREAMING_SNAKE_CASE_ = qa - qa SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1) return low_lim def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 for i in list_vote: if i > actual_result: SCREAMING_SNAKE_CASE_ = not_safe + 1 else: if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]] A : Optional[Any] = pd.DataFrame( data_input, columns=["total_user", "total_even", "days"] ) A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values) # split data A : Optional[int] = normalize_df[:, 2].tolist() A : List[str] = normalize_df[:, 0].tolist() A : int = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) A : int = normalize_df[:, [1, 2]].tolist() A : Tuple = x[: len(x) - 1] A : str = x[len(x) - 1 :] # for linear regression & sarimax A : Tuple = total_date[: len(total_date) - 1] A : Optional[int] = total_user[: len(total_user) - 1] A : str = total_match[: len(total_match) - 1] A : List[Any] = total_date[len(total_date) - 1 :] A : List[Any] = total_user[len(total_user) - 1 :] A : Optional[Any] = total_match[len(total_match) - 1 :] # voting system with forecasting A : Optional[int] = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data A : str = "" if data_safety_checker(res_vote, tst_user) else "not " print("Today's data is {not_str}safe.")
305
0
from collections import deque class lowerCamelCase : """simple docstring""" def __init__( self : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> None: SCREAMING_SNAKE_CASE_ = process_name # process name SCREAMING_SNAKE_CASE_ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time SCREAMING_SNAKE_CASE_ = arrival_time SCREAMING_SNAKE_CASE_ = burst_time # remaining burst time SCREAMING_SNAKE_CASE_ = 0 # total time of the process wait in ready queue SCREAMING_SNAKE_CASE_ = 0 # time from arrival time to completion time class lowerCamelCase : """simple docstring""" def __init__( self : Tuple , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : deque[Process] , __magic_name__ : int , ) -> None: # total number of mlfq's queues SCREAMING_SNAKE_CASE_ = number_of_queues # time slice of queues that round robin algorithm applied SCREAMING_SNAKE_CASE_ = time_slices # unfinished process is in this ready_queue SCREAMING_SNAKE_CASE_ = queue # current time SCREAMING_SNAKE_CASE_ = current_time # finished process is in this sequence queue SCREAMING_SNAKE_CASE_ = deque() def __A ( self : Dict ) -> list[str]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(__magic_name__ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(__magic_name__ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def __A ( self : Tuple , __magic_name__ : list[Process] ) -> list[int]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(__magic_name__ ) ): completion_times.append(queue[i].stop_time ) return completion_times def __A ( self : str , __magic_name__ : deque[Process] ) -> list[int]: return [q.burst_time for q in queue] def __A ( self : Optional[Any] , __magic_name__ : Process ) -> int: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def __A ( self : Optional[Any] , __magic_name__ : deque[Process] ) -> deque[Process]: SCREAMING_SNAKE_CASE_ = deque() # sequence deque of finished process while len(__magic_name__ ) != 0: SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(__magic_name__ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 SCREAMING_SNAKE_CASE_ = 0 # set the process's turnaround time because it is finished SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time # set the completion time SCREAMING_SNAKE_CASE_ = self.current_time # add the process to queue that has finished queue finished.append(__magic_name__ ) self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def __A ( self : Any , __magic_name__ : deque[Process] , __magic_name__ : int ) -> tuple[deque[Process], deque[Process]]: SCREAMING_SNAKE_CASE_ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(__magic_name__ ) ): SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(__magic_name__ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time SCREAMING_SNAKE_CASE_ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(__magic_name__ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished SCREAMING_SNAKE_CASE_ = 0 # set the finish time SCREAMING_SNAKE_CASE_ = self.current_time # update the process' turnaround time because it is finished SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(__magic_name__ ) self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def __A ( self : Any ) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest A : Dict = Process("P1", 0, 53) A : str = Process("P2", 0, 17) A : List[Any] = Process("P3", 0, 68) A : List[str] = Process("P4", 0, 24) A : Dict = 3 A : Any = [17, 25] A : Dict = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])}) A : Union[str, Any] = Process("P1", 0, 53) A : Any = Process("P2", 0, 17) A : Dict = Process("P3", 0, 68) A : List[str] = Process("P4", 0, 24) A : Optional[int] = 3 A : int = [17, 25] A : Union[str, Any] = deque([Pa, Pa, Pa, Pa]) A : Tuple = MLFQ(number_of_queues, time_slices, queue, 0) A : Tuple = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f"waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print completion times of processes(P1, P2, P3, P4) print( f"completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print total turnaround times of processes(P1, P2, P3, P4) print( f"turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print sequence of finished processes print( f"sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}" )
364
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A : List[str] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Any = [ "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "SwinForImageClassification", "SwinForMaskedImageModeling", "SwinModel", "SwinPreTrainedModel", "SwinBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", "TFSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
305
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = KandinskyVaaInpaintPipeline lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] lowerCamelCase__ = [ '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] lowerCamelCase__ = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowerCamelCase__ = False @property def __A ( self : Any ) -> List[str]: return 32 @property def __A ( self : Union[str, Any] ) -> Optional[Any]: return 32 @property def __A ( self : int ) -> int: return self.time_input_dim @property def __A ( self : str ) -> str: return self.time_input_dim * 4 @property def __A ( self : Union[str, Any] ) -> Any: return 100 @property def __A ( self : List[str] ) -> str: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**__magic_name__ ) return model @property def __A ( self : List[str] ) -> List[Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __A ( self : List[str] ) -> List[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs ) return model def __A ( self : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE_ = self.dummy_unet SCREAMING_SNAKE_CASE_ = self.dummy_movq SCREAMING_SNAKE_CASE_ = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type="epsilon" , thresholding=__magic_name__ , ) SCREAMING_SNAKE_CASE_ = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __A ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Tuple=0 ) -> Dict: SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __magic_name__ ) # create init_image SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(__magic_name__ ) ).convert("RGB" ).resize((256, 256) ) # create mask SCREAMING_SNAKE_CASE_ = np.ones((64, 64) , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = 0 if str(__magic_name__ ).startswith("mps" ): SCREAMING_SNAKE_CASE_ = torch.manual_seed(__magic_name__ ) else: SCREAMING_SNAKE_CASE_ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) SCREAMING_SNAKE_CASE_ = { "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __A ( self : Optional[Any] ) -> Dict: SCREAMING_SNAKE_CASE_ = "cpu" SCREAMING_SNAKE_CASE_ = self.get_dummy_components() SCREAMING_SNAKE_CASE_ = self.pipeline_class(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(__magic_name__ ) ) SCREAMING_SNAKE_CASE_ = output.images SCREAMING_SNAKE_CASE_ = pipe( **self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0] SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_ = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def __A ( self : int ) -> List[str]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase (unittest.TestCase ): """simple docstring""" def __A ( self : List[Any] ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : List[Any] ) -> str: SCREAMING_SNAKE_CASE_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" ) SCREAMING_SNAKE_CASE_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) SCREAMING_SNAKE_CASE_ = np.ones((768, 768) , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = "a hat" SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__magic_name__ ) SCREAMING_SNAKE_CASE_ = KandinskyVaaInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE_ = pipeline.to(__magic_name__ ) pipeline.set_progress_bar_config(disable=__magic_name__ ) SCREAMING_SNAKE_CASE_ = torch.Generator(device="cpu" ).manual_seed(0 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pipe_prior( __magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() SCREAMING_SNAKE_CASE_ = pipeline( image=__magic_name__ , mask_image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) SCREAMING_SNAKE_CASE_ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
365
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel A : Union[str, Any] = "0.12" # assumed parallelism: 8 @require_flax @is_staging_test class lowerCamelCase (unittest.TestCase ): """simple docstring""" @classmethod def __A ( cls : Any ) -> Dict: SCREAMING_SNAKE_CASE_ = TOKEN HfFolder.save_token(__magic_name__ ) @classmethod def __A ( cls : Optional[int] ) -> Tuple: try: delete_repo(token=cls._token , repo_id="test-model-flax" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" ) except HTTPError: pass def __A ( self : str ) -> str: SCREAMING_SNAKE_CASE_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ ) model.push_to_hub("test-model-flax" , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id="test-model-flax" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__magic_name__ , repo_id="test-model-flax" , push_to_hub=__magic_name__ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' ) def __A ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ ) model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __magic_name__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=__magic_name__ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) ) SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' ) def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params ) SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: SCREAMING_SNAKE_CASE_ = False return models_are_equal @require_flax class lowerCamelCase (unittest.TestCase ): """simple docstring""" def __A ( self : str ) -> Dict: SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ ) SCREAMING_SNAKE_CASE_ = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) ) with self.assertRaises(__magic_name__ ): SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ ) self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) ) def __A ( self : Optional[Any] ) -> Tuple: SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ ) SCREAMING_SNAKE_CASE_ = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="10KB" ) with self.assertRaises(__magic_name__ ): SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ ) self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) ) def __A ( self : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE_ = "bert" SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(__magic_name__ ): SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def __A ( self : List[str] ) -> Dict: SCREAMING_SNAKE_CASE_ = "bert" SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(__magic_name__ ): SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ ) SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ ) self.assertIsNotNone(__magic_name__ )
305
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowerCamelCase (unittest.TestCase ): """simple docstring""" def __A ( self : int ) -> Any: SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = BlipImageProcessor() SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ ) processor.save_pretrained(self.tmpdirname ) def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor def __A ( self : int ) -> Any: shutil.rmtree(self.tmpdirname ) def __A ( self : Dict ) -> Dict: SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) def __A ( self : Tuple ) -> int: SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" ) SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) SCREAMING_SNAKE_CASE_ = "lower newer" SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Dict ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) SCREAMING_SNAKE_CASE_ = "lower newer" SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def __A ( self : Dict ) -> Tuple: SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def __A ( self : List[str] ) -> int: SCREAMING_SNAKE_CASE_ = self.get_image_processor() SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) SCREAMING_SNAKE_CASE_ = "lower newer" SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
366
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input A : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" def a__ ( ): SCREAMING_SNAKE_CASE_ = _ask_options( "In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: SCREAMING_SNAKE_CASE_ = get_sagemaker_input() else: SCREAMING_SNAKE_CASE_ = get_cluster_input() return config def a__ ( __UpperCamelCase=None ): if subparsers is not None: SCREAMING_SNAKE_CASE_ = subparsers.add_parser("config" , description=__UpperCamelCase ) else: SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Accelerate config command" , description=__UpperCamelCase ) parser.add_argument( "--config_file" , default=__UpperCamelCase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=__UpperCamelCase ) return parser def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = get_user_input() if args.config_file is not None: SCREAMING_SNAKE_CASE_ = args.config_file else: if not os.path.isdir(__UpperCamelCase ): os.makedirs(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = default_yaml_config_file if config_file.endswith(".json" ): config.to_json_file(__UpperCamelCase ) else: config.to_yaml_file(__UpperCamelCase ) print(F'''accelerate configuration saved at {config_file}''' ) def a__ ( ): SCREAMING_SNAKE_CASE_ = config_command_parser() SCREAMING_SNAKE_CASE_ = parser.parse_args() config_command(__UpperCamelCase ) if __name__ == "__main__": main()
305
0
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''OwlViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Any , __magic_name__ : Union[str, Any]=None , __magic_name__ : Any=None , **__magic_name__ : List[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __magic_name__ , ) SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__magic_name__ , __magic_name__ ) def __call__( self : str , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : int=None , __magic_name__ : List[str]="max_length" , __magic_name__ : Optional[Any]="np" , **__magic_name__ : Optional[int] ) -> int: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__magic_name__ , __magic_name__ ) or (isinstance(__magic_name__ , __magic_name__ ) and not isinstance(text[0] , __magic_name__ )): SCREAMING_SNAKE_CASE_ = [self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )] elif isinstance(__magic_name__ , __magic_name__ ) and isinstance(text[0] , __magic_name__ ): SCREAMING_SNAKE_CASE_ = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE_ = max([len(__magic_name__ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__magic_name__ ) != max_num_queries: SCREAMING_SNAKE_CASE_ = t + [" "] * (max_num_queries - len(__magic_name__ )) SCREAMING_SNAKE_CASE_ = self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) encodings.append(__magic_name__ ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": SCREAMING_SNAKE_CASE_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) SCREAMING_SNAKE_CASE_ = BatchEncoding() SCREAMING_SNAKE_CASE_ = input_ids SCREAMING_SNAKE_CASE_ = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE_ = BatchEncoding() SCREAMING_SNAKE_CASE_ = self.image_processor( __magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ).pixel_values SCREAMING_SNAKE_CASE_ = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) if text is not None and images is not None: SCREAMING_SNAKE_CASE_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ ) def __A ( self : Union[str, Any] , *__magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ) -> Optional[Any]: return self.image_processor.post_process(*__magic_name__ , **__magic_name__ ) def __A ( self : List[Any] , *__magic_name__ : str , **__magic_name__ : Dict ) -> str: return self.image_processor.post_process_object_detection(*__magic_name__ , **__magic_name__ ) def __A ( self : int , *__magic_name__ : Any , **__magic_name__ : Tuple ) -> List[Any]: return self.image_processor.post_process_image_guided_detection(*__magic_name__ , **__magic_name__ ) def __A ( self : Dict , *__magic_name__ : List[Any] , **__magic_name__ : Tuple ) -> Dict: return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ ) def __A ( self : Union[str, Any] , *__magic_name__ : Optional[Any] , **__magic_name__ : List[str] ) -> Dict: return self.tokenizer.decode(*__magic_name__ , **__magic_name__ ) @property def __A ( self : Optional[int] ) -> Union[str, Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , ) return self.image_processor_class @property def __A ( self : Any ) -> List[str]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , ) return self.image_processor
367
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=SCREAMING_SNAKE_CASE__ ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) lowerCamelCase__ = Features({'''text''': Value('''string''' )} ) lowerCamelCase__ = Features({'''summary''': Value('''string''' )} ) lowerCamelCase__ = "text" lowerCamelCase__ = "summary" @property def __A ( self : Dict ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
305
0
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = "x" , __UpperCamelCase = 1_0**-1_0 , __UpperCamelCase = 1 , ): SCREAMING_SNAKE_CASE_ = symbols(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = lambdify(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = lambdify(__UpperCamelCase , diff(__UpperCamelCase , __UpperCamelCase ) ) SCREAMING_SNAKE_CASE_ = starting_point while True: if diff_function(__UpperCamelCase ) != 0: SCREAMING_SNAKE_CASE_ = prev_guess - multiplicity * func(__UpperCamelCase ) / diff_function( __UpperCamelCase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess SCREAMING_SNAKE_CASE_ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial # Find fourth Root of 5 print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}") # Find value of e print( "The root of log(y) - 1 = 0 is ", f"{newton_raphson('log(y) - 1', 2, variable='y')}", ) # Exponential Roots print( "The root of exp(x) - 1 = 0 is", f"{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}", ) # Find root of cos(x) print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
368
from ....utils import logging A : List[str] = logging.get_logger(__name__) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=2_048 ) -> List[Any]: SCREAMING_SNAKE_CASE_ = config.__dict__ SCREAMING_SNAKE_CASE_ = modal_hidden_size if num_labels: SCREAMING_SNAKE_CASE_ = num_labels
305
0
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class lowerCamelCase : """simple docstring""" lowerCamelCase__ = None def __A ( self : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE_ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __magic_name__ ) def __A ( self : Dict ) -> Dict: SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_ = os.path.join(__magic_name__ , "feat_extract.json" ) feat_extract_first.to_json_file(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_json_file(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __A ( self : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE_ = feat_extract_first.save_pretrained(__magic_name__ )[0] check_json_file_has_correct_format(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_pretrained(__magic_name__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __A ( self : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE_ = self.feature_extraction_class() self.assertIsNotNone(__magic_name__ )
369
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViltImageProcessor''' lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : List[str]=None , **__magic_name__ : Any ) -> str: SCREAMING_SNAKE_CASE_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __magic_name__ , ) SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__magic_name__ , __magic_name__ ) SCREAMING_SNAKE_CASE_ = self.image_processor def __call__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ) -> BatchEncoding: SCREAMING_SNAKE_CASE_ = self.tokenizer( text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) # add pixel_values + pixel_mask SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ ) encoding.update(__magic_name__ ) return encoding def __A ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Optional[Any] ) -> Any: return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ ) def __A ( self : Dict , *__magic_name__ : List[Any] , **__magic_name__ : Union[str, Any] ) -> str: return self.tokenizer.decode(*__magic_name__ , **__magic_name__ ) @property def __A ( self : Optional[int] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __A ( self : Dict ) -> List[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , ) return self.image_processor_class @property def __A ( self : int ) -> List[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , ) return self.image_processor
305
0
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = checkpoints.load_tax_checkpoint(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = flatten_dict(__UpperCamelCase ) return flax_params def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } SCREAMING_SNAKE_CASE_ = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key SCREAMING_SNAKE_CASE_ = ".".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): SCREAMING_SNAKE_CASE_ = new_key.replace(__UpperCamelCase , __UpperCamelCase ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): SCREAMING_SNAKE_CASE_ = new_key.replace(__UpperCamelCase , __UpperCamelCase ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number SCREAMING_SNAKE_CASE_ = re.sub(r"layers_(\d+)" , r"layer.\1" , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = new_key.replace("encoder" , "encoder.encoder" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number SCREAMING_SNAKE_CASE_ = re.sub(r"layers_(\d+)" , r"layer.\1" , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = flax_dict[key] SCREAMING_SNAKE_CASE_ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): SCREAMING_SNAKE_CASE_ = torch.from_numpy(converted_dict[key].T ) else: SCREAMING_SNAKE_CASE_ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ): SCREAMING_SNAKE_CASE_ = get_flax_param(__UpperCamelCase ) if not use_large: SCREAMING_SNAKE_CASE_ = PixaStructVisionConfig() SCREAMING_SNAKE_CASE_ = PixaStructTextConfig() else: SCREAMING_SNAKE_CASE_ = PixaStructVisionConfig( hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 ) SCREAMING_SNAKE_CASE_ = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 ) SCREAMING_SNAKE_CASE_ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = PixaStructForConditionalGeneration(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = rename_and_convert_flax_params(__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" ) SCREAMING_SNAKE_CASE_ = PixaStructImageProcessor() SCREAMING_SNAKE_CASE_ = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase ) if use_large: SCREAMING_SNAKE_CASE_ = 4_0_9_6 SCREAMING_SNAKE_CASE_ = True # mkdir if needed os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) print("Model saved in {}".format(__UpperCamelCase ) ) if __name__ == "__main__": A : Any = argparse.ArgumentParser() parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--use_large", action="store_true", help="Use large model.") parser.add_argument("--is_vqa", action="store_true", help="Use large model.") A : Dict = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
370
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING A : str = logging.get_logger(__name__) A : Optional[int] = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''table-transformer''' lowerCamelCase__ = ['''past_key_values'''] lowerCamelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : List[Any] , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Any=3 , __magic_name__ : List[str]=100 , __magic_name__ : Union[str, Any]=6 , __magic_name__ : Dict=2_048 , __magic_name__ : str=8 , __magic_name__ : int=6 , __magic_name__ : List[Any]=2_048 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="relu" , __magic_name__ : List[str]=256 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : str=1.0 , __magic_name__ : int=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : List[str]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.1 , **__magic_name__ : Tuple , ) -> str: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ = backbone_config.get("model_type" ) SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ ) # set timm attributes to None SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None SCREAMING_SNAKE_CASE_ = use_timm_backbone SCREAMING_SNAKE_CASE_ = backbone_config SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = num_queries SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = encoder_ffn_dim SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = encoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = activation_function SCREAMING_SNAKE_CASE_ = init_std SCREAMING_SNAKE_CASE_ = init_xavier_std SCREAMING_SNAKE_CASE_ = encoder_layerdrop SCREAMING_SNAKE_CASE_ = decoder_layerdrop SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = auxiliary_loss SCREAMING_SNAKE_CASE_ = position_embedding_type SCREAMING_SNAKE_CASE_ = backbone SCREAMING_SNAKE_CASE_ = use_pretrained_backbone SCREAMING_SNAKE_CASE_ = dilation # Hungarian matcher SCREAMING_SNAKE_CASE_ = class_cost SCREAMING_SNAKE_CASE_ = bbox_cost SCREAMING_SNAKE_CASE_ = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE_ = mask_loss_coefficient SCREAMING_SNAKE_CASE_ = dice_loss_coefficient SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient SCREAMING_SNAKE_CASE_ = giou_loss_coefficient SCREAMING_SNAKE_CASE_ = eos_coefficient super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ ) @property def __A ( self : Union[str, Any] ) -> int: return self.encoder_attention_heads @property def __A ( self : Any ) -> int: return self.d_model class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = version.parse('''1.11''' ) @property def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def __A ( self : Any ) -> float: return 1e-5 @property def __A ( self : int ) -> int: return 12
305
0
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "huggingface/informer-tourism-monthly": ( "https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json" ), # See all Informer models at https://huggingface.co/models?filter=informer } class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''informer''' lowerCamelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : str , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : str = "student_t" , __magic_name__ : str = "nll" , __magic_name__ : int = 1 , __magic_name__ : List[int] = None , __magic_name__ : Optional[Union[str, bool]] = "mean" , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : Optional[List[int]] = None , __magic_name__ : Optional[List[int]] = None , __magic_name__ : int = 64 , __magic_name__ : int = 32 , __magic_name__ : int = 32 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : bool = True , __magic_name__ : str = "gelu" , __magic_name__ : float = 0.05 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : int = 100 , __magic_name__ : float = 0.02 , __magic_name__ : Optional[int]=True , __magic_name__ : str = "prob" , __magic_name__ : int = 5 , __magic_name__ : bool = True , **__magic_name__ : int , ) -> Any: # time series specific configuration SCREAMING_SNAKE_CASE_ = prediction_length SCREAMING_SNAKE_CASE_ = context_length or prediction_length SCREAMING_SNAKE_CASE_ = distribution_output SCREAMING_SNAKE_CASE_ = loss SCREAMING_SNAKE_CASE_ = input_size SCREAMING_SNAKE_CASE_ = num_time_features SCREAMING_SNAKE_CASE_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] SCREAMING_SNAKE_CASE_ = scaling SCREAMING_SNAKE_CASE_ = num_dynamic_real_features SCREAMING_SNAKE_CASE_ = num_static_real_features SCREAMING_SNAKE_CASE_ = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(__magic_name__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) SCREAMING_SNAKE_CASE_ = cardinality else: SCREAMING_SNAKE_CASE_ = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(__magic_name__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) SCREAMING_SNAKE_CASE_ = embedding_dimension else: SCREAMING_SNAKE_CASE_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] SCREAMING_SNAKE_CASE_ = num_parallel_samples # Transformer architecture configuration SCREAMING_SNAKE_CASE_ = input_size * len(self.lags_sequence ) + self._number_of_features SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = encoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = encoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = encoder_layerdrop SCREAMING_SNAKE_CASE_ = decoder_layerdrop SCREAMING_SNAKE_CASE_ = activation_function SCREAMING_SNAKE_CASE_ = init_std SCREAMING_SNAKE_CASE_ = use_cache # Informer SCREAMING_SNAKE_CASE_ = attention_type SCREAMING_SNAKE_CASE_ = sampling_factor SCREAMING_SNAKE_CASE_ = distil super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ ) @property def __A ( self : List[Any] ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
371
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
305
0
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class lowerCAmelCase : '''simple docstring''' _A : torch.Tensor # [batch_size x 3] _A : torch.Tensor # [batch_size x 3] _A : torch.Tensor # [batch_size x 3] _A : torch.Tensor # [batch_size x 3] _A : int _A : int _A : float _A : float _A : Tuple[int] def lowerCAmelCase ( self : int ) -> int: """simple docstring""" assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def lowerCAmelCase ( self : Dict ) -> torch.Tensor: """simple docstring""" __lowercase : Union[str, Any] = torch.arange(self.height * self.width ) __lowercase : int = torch.stack( [ pixel_indices % self.width, torch.div(__a , self.width , rounding_mode="""trunc""" ), ] , axis=1 , ) return coords @property def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" __lowercase , *__lowercase : Union[str, Any] = self.shape __lowercase : str = int(np.prod(__a ) ) __lowercase : Optional[Any] = self.get_image_coords() __lowercase : Dict = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) __lowercase : Dict = self.get_camera_rays(__a ) __lowercase : Union[str, Any] = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def lowerCAmelCase ( self : List[str] , __a : torch.Tensor ) -> torch.Tensor: """simple docstring""" __lowercase , *__lowercase , __lowercase : Optional[int] = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] __lowercase : Optional[Any] = coords.view(__a , -1 , 2 ) __lowercase : Dict = self.resolution() __lowercase : Optional[Any] = self.fov() __lowercase : List[str] = (flat.float() / (res - 1)) * 2 - 1 __lowercase : Union[str, Any] = fracs * torch.tan(fov / 2 ) __lowercase : Any = fracs.view(__a , -1 , 2 ) __lowercase : Optional[int] = ( self.z.view(__a , 1 , 3 ) + self.x.view(__a , 1 , 3 ) * fracs[:, :, :1] + self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:] ) __lowercase : Tuple = directions / directions.norm(dim=-1 , keepdim=__a ) __lowercase : Any = torch.stack( [ torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(__a , *__a , 2 , 3 ) def lowerCAmelCase ( self : Optional[int] , __a : int , __a : int ) -> "DifferentiableProjectiveCamera": """simple docstring""" assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , ) def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : Optional[int] = [] __lowercase : Dict = [] __lowercase : List[str] = [] __lowercase : Union[str, Any] = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): __lowercase : Optional[int] = np.array([np.sin(lowerCAmelCase_ ), np.cos(lowerCAmelCase_ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) __lowercase : int = -z * 4 __lowercase : Tuple = np.array([np.cos(lowerCAmelCase_ ), -np.sin(lowerCAmelCase_ ), 0.0] ) __lowercase : str = np.cross(lowerCAmelCase_ , lowerCAmelCase_ ) origins.append(lowerCAmelCase_ ) xs.append(lowerCAmelCase_ ) ys.append(lowerCAmelCase_ ) zs.append(lowerCAmelCase_ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , width=lowerCAmelCase_ , height=lowerCAmelCase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase_ )) , )
306
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : Optional[Any] = { '''configuration_poolformer''': [ '''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PoolFormerConfig''', '''PoolFormerOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ['''PoolFormerFeatureExtractor'''] lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ '''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PoolFormerForImageClassification''', '''PoolFormerModel''', '''PoolFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
306
1
import sys from collections import defaultdict class lowerCAmelCase : '''simple docstring''' def __init__( self : int ) -> Dict: """simple docstring""" __lowercase : str = [] def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] ) -> Any: """simple docstring""" return self.node_position[vertex] def lowerCAmelCase ( self : Dict , __a : Tuple , __a : Tuple ) -> int: """simple docstring""" __lowercase : Tuple = pos def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : Optional[Any] , __a : int , __a : Optional[Any] ) -> List[Any]: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowercase : Union[str, Any] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowercase : Tuple = 2 * start + 1 else: __lowercase : Dict = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowercase , __lowercase : Dict = heap[smallest_child], positions[smallest_child] __lowercase , __lowercase : int = ( heap[start], positions[start], ) __lowercase , __lowercase : Dict = temp, tempa __lowercase : Optional[Any] = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , __a ) self.top_to_bottom(__a , __a , __a , __a ) def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[Any] , __a : int , __a : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = position[index] while index != 0: __lowercase : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowercase : Union[str, Any] = heap[parent] __lowercase : List[str] = position[parent] self.set_position(position[parent] , __a ) else: __lowercase : List[Any] = val __lowercase : Optional[int] = temp self.set_position(__a , __a ) break __lowercase : Optional[Any] = parent else: __lowercase : int = val __lowercase : Dict = temp self.set_position(__a , 0 ) def lowerCAmelCase ( self : str , __a : Optional[int] , __a : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase : int = len(__a ) // 2 - 1 for i in range(__a , -1 , -1 ): self.top_to_bottom(__a , __a , len(__a ) , __a ) def lowerCAmelCase ( self : List[str] , __a : List[str] , __a : int ) -> List[str]: """simple docstring""" __lowercase : int = positions[0] __lowercase : Optional[Any] = sys.maxsize self.top_to_bottom(__a , 0 , len(__a ) , __a ) return temp def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ): __lowercase : int = Heap() __lowercase : Optional[Any] = [0] * len(lowerCAmelCase_ ) __lowercase : List[Any] = [-1] * len(lowerCAmelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowercase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex __lowercase : List[str] = [] for vertex in range(len(lowerCAmelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(lowerCAmelCase_ ) heap.node_position.append(lowerCAmelCase_ ) __lowercase : str = [] __lowercase : Tuple = 1 __lowercase : int = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowercase : int = 0 __lowercase : Dict = distance heap.heapify(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(1 , len(lowerCAmelCase_ ) ): __lowercase : List[str] = heap.delete_minimum(lowerCAmelCase_ , lowerCAmelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowercase : Optional[Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(lowerCAmelCase_ )] ): __lowercase : Dict = distance heap.bottom_to_top( lowerCAmelCase_ , heap.get_position(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Optional[int] = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > lowerCamelCase : Optional[Any] = int(input('''Enter number of edges: ''').strip()) lowerCamelCase : Tuple = defaultdict(list) for _ in range(edges_number): lowerCamelCase : Optional[Any] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
306
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = 2 __lowercase : Union[str, Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowerCAmelCase_ ) if n > 1: factors.append(lowerCAmelCase_ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
306
1
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCamelCase : List[str] = logging.get_logger(__name__) class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : str , *__a : Optional[Any] , **__a : List[Any] ) -> None: """simple docstring""" warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , __a , ) super().__init__(*__a , **__a )
306
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __lowercase : List[str] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __lowercase : Optional[Any] = model(__a )["""last_hidden_state"""] __lowercase : Any = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __a ) # compare the actual values for a slice. __lowercase : Dict = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
306
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : str = XLMRobertaTokenizer _A : Dict = XLMRobertaTokenizerFast _A : List[Any] = True _A : int = True def lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowercase : Tuple = XLMRobertaTokenizer(__a , keep_accents=__a ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : int = """<pad>""" __lowercase : Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(__a ) , 1002 ) def lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1002 ) def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase : Tuple = XLMRobertaTokenizer(__a , keep_accents=__a ) __lowercase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __lowercase : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __lowercase : int = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual( __a , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __lowercase : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): __lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a ) __lowercase : List[str] = self.tokenizer_class.from_pretrained(__a , **__a ) __lowercase : List[str] = tempfile.mkdtemp() __lowercase : Optional[Any] = tokenizer_r.save_pretrained(__a ) __lowercase : int = tokenizer_p.save_pretrained(__a ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) __lowercase : Optional[int] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(__a , __a ) # Checks everything loads correctly in the same way __lowercase : str = tokenizer_r.from_pretrained(__a ) __lowercase : Any = tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__a ) # Save tokenizer rust, legacy_format=True __lowercase : Dict = tempfile.mkdtemp() __lowercase : List[Any] = tokenizer_r.save_pretrained(__a , legacy_format=__a ) __lowercase : Optional[int] = tokenizer_p.save_pretrained(__a ) # Checks it save with the same files self.assertSequenceEqual(__a , __a ) # Checks everything loads correctly in the same way __lowercase : int = tokenizer_r.from_pretrained(__a ) __lowercase : List[Any] = tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) shutil.rmtree(__a ) # Save tokenizer rust, legacy_format=False __lowercase : Union[str, Any] = tempfile.mkdtemp() __lowercase : str = tokenizer_r.save_pretrained(__a , legacy_format=__a ) __lowercase : Optional[int] = tokenizer_p.save_pretrained(__a ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __lowercase : Optional[int] = tokenizer_r.from_pretrained(__a ) __lowercase : Optional[Any] = tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) shutil.rmtree(__a ) @cached_property def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" ) def lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__a , f.name ) __lowercase : int = XLMRobertaTokenizer(f.name , keep_accents=__a ) __lowercase : List[str] = pickle.dumps(__a ) pickle.loads(__a ) def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" if not self.test_rust_tokenizer: return __lowercase : Any = self.get_tokenizer() __lowercase : str = self.get_rust_tokenizer() __lowercase : int = """I was born in 92000, and this is falsé.""" __lowercase : Dict = tokenizer.tokenize(__a ) __lowercase : Optional[int] = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) __lowercase : Optional[Any] = tokenizer.encode(__a , add_special_tokens=__a ) __lowercase : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) __lowercase : int = self.get_rust_tokenizer() __lowercase : Optional[Any] = tokenizer.encode(__a ) __lowercase : Any = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) @slow def lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = """Hello World!""" __lowercase : Optional[Any] = [0, 35378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__a , self.big_tokenizer.encode(__a ) ) @slow def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase : Any = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) __lowercase : str = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__a , self.big_tokenizer.encode(__a ) ) @slow def lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" __lowercase : int = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
306
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Optional[Any] = len(lowerCAmelCase_ ) __lowercase : str = len(lowerCAmelCase_ ) __lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __lowercase : Tuple = True for i in range(lowerCAmelCase_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __lowercase : Optional[Any] = True if a[i].islower(): __lowercase : Dict = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
306
1
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : Union[str, Any] = [ '''word_embeddings_layernorm.weight''', '''word_embeddings_layernorm.bias''', '''input_layernorm.weight''', '''input_layernorm.bias''', '''post_attention_layernorm.weight''', '''post_attention_layernorm.bias''', '''self_attention.dense.bias''', '''mlp.dense_4h_to_h.bias''', '''ln_f.weight''', '''ln_f.bias''', ] lowerCamelCase : Union[str, Any] = [ '''mlp.dense_4h_to_h.weight''', '''self_attention.dense.weight''', ] def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ): __lowercase : int = { """word_embeddings.weight""": """word_embeddings.weight""", """word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""", """word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""", """weight""": """ln_f.weight""", """bias""": """ln_f.bias""", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks __lowercase : int = int(re.match(r""".*layer_(\d*).*""" , lowerCAmelCase_ )[1] ) layer_number -= 3 return F"h.{layer_number}." + key def snake_case_ ( lowerCAmelCase_ : Tuple ): if dtype == torch.bool: return 1 / 8 __lowercase : Any = re.search(r"""[^\d](\d+)$""" , str(lowerCAmelCase_ ) ) if bit_search is None: raise ValueError(F"`dtype` is not a valid dtype: {dtype}." ) __lowercase : str = int(bit_search.groups()[0] ) return bit_size // 8 def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ): # Construct model if bloom_config_file == "": __lowercase : Union[str, Any] = BloomConfig() else: __lowercase : List[str] = BloomConfig.from_json_file(lowerCAmelCase_ ) if shard_model: __lowercase : Any = os.listdir(lowerCAmelCase_ ) __lowercase : Optional[Any] = sorted(filter(lambda lowerCAmelCase_ : s.startswith("""layer""" ) and "model_00" in s , lowerCAmelCase_ ) ) __lowercase : Dict = {"""weight_map""": {}, """metadata""": {}} __lowercase : str = 0 __lowercase : str = None __lowercase : Any = BloomConfig() for j, file in enumerate(lowerCAmelCase_ ): print("""Processing file: {}""".format(lowerCAmelCase_ ) ) __lowercase : int = None for i in range(lowerCAmelCase_ ): # load all TP files __lowercase : Optional[Any] = file.replace("""model_00""" , F"model_0{i}" ) __lowercase : List[Any] = torch.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , map_location="""cpu""" ) # Rename keys in the transformers names __lowercase : str = list(temp.keys() ) for key in keys: __lowercase : List[str] = temp.pop(lowerCAmelCase_ ) if tensors is None: __lowercase : Union[str, Any] = temp else: for key in tensors.keys(): if any(key.endswith(lowerCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel __lowercase : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks __lowercase : Tuple = torch.cat([tensors[key], temp[key]] , dim=lowerCAmelCase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowerCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): __lowercase : Any = tensors[key] / pretraining_tp torch.save( lowerCAmelCase_ , os.path.join( lowerCAmelCase_ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(lowerCAmelCase_ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): __lowercase : Optional[Any] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: __lowercase : Any = """pytorch_model_{}-of-{}.bin""".format( str(j + 1 ).zfill(5 ) , str(len(lowerCAmelCase_ ) ).zfill(5 ) ) __lowercase : str = BloomConfig() __lowercase : Tuple = pytorch_dump_folder_path + """/""" + CONFIG_NAME __lowercase : Tuple = total_size with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) with open(os.path.join(lowerCAmelCase_ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f: __lowercase : Optional[int] = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + """\n""" f.write(lowerCAmelCase_ ) else: __lowercase : Tuple = BloomModel(lowerCAmelCase_ ) __lowercase : Tuple = os.listdir(lowerCAmelCase_ ) __lowercase : List[Any] = sorted(filter(lambda lowerCAmelCase_ : s.startswith("""layer""" ) and "model_00" in s , lowerCAmelCase_ ) ) __lowercase : Dict = None for i, file in enumerate(lowerCAmelCase_ ): __lowercase : Optional[int] = None for i in range(lowerCAmelCase_ ): # load all TP files __lowercase : int = file.replace("""model_00""" , F"model_0{i}" ) __lowercase : str = torch.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , map_location="""cpu""" ) # Rename keys in the transformers names __lowercase : Any = list(temp.keys() ) for key in keys: __lowercase : List[Any] = temp.pop(lowerCAmelCase_ ) if tensors is None: __lowercase : int = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(lowerCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel __lowercase : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks __lowercase : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=lowerCAmelCase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowerCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): __lowercase : List[str] = tensors[key] / pretraining_tp __lowercase : Union[str, Any] = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ ) assert not other_keys.unexpected_keys, F"The keys {other_keys.unexpected_keys} are unexpected" if missing_keys is None: __lowercase : List[Any] = set(other_keys.missing_keys ) else: __lowercase : Union[str, Any] = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F"The keys {missing_keys} are missing" # Save pytorch-model os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) __lowercase : int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME __lowercase : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" ) if config.torch_dtype is not None: __lowercase : Union[str, Any] = model.to(config.torch_dtype ) torch.save(model.state_dict() , lowerCAmelCase_ ) print(F"Save configuration file to {pytorch_config_dump_path}" ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bloom_checkpoint_path''', default=None, type=str, required=True, help='''Path to the Megatron-LM checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--bloom_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--shard_model''', action='''store_true''', help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''', ) parser.add_argument( '''--pretraining_tp''', default=4, type=int, help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''', ) lowerCamelCase : Union[str, Any] = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
306
from scipy.stats import spearmanr import datasets lowerCamelCase : List[str] = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' lowerCamelCase : List[str] = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' lowerCamelCase : Union[str, Any] = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]: """simple docstring""" __lowercase : Optional[Any] = spearmanr(__a , __a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
306
1
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : List[str] = [] embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight", F"stage{idx}.patch_embed.proj.weight", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias", F"stage{idx}.patch_embed.proj.bias", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight", F"stage{idx}.patch_embed.norm.weight", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias", F"stage{idx}.patch_embed.norm.bias", ) ) return embed def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ): __lowercase : Optional[Any] = [] attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight", F"stage{idx}.blocks.{cnt}.attn.proj_q.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias", F"stage{idx}.blocks.{cnt}.attn.proj_q.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight", F"stage{idx}.blocks.{cnt}.attn.proj_k.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias", F"stage{idx}.blocks.{cnt}.attn.proj_k.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight", F"stage{idx}.blocks.{cnt}.attn.proj_v.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias", F"stage{idx}.blocks.{cnt}.attn.proj_v.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight", F"stage{idx}.blocks.{cnt}.attn.proj.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias", F"stage{idx}.blocks.{cnt}.attn.proj.bias", ) ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") ) return attention_weights def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ): __lowercase : str = [] token.append((F"cvt.encoder.stages.{idx}.cls_token", """stage2.cls_token""") ) return token def snake_case_ ( ): __lowercase : Any = [] head.append(("""layernorm.weight""", """norm.weight""") ) head.append(("""layernorm.bias""", """norm.bias""") ) head.append(("""classifier.weight""", """head.weight""") ) head.append(("""classifier.bias""", """head.bias""") ) return head def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any ): __lowercase : Optional[Any] = """imagenet-1k-id2label.json""" __lowercase : str = 1000 __lowercase : List[str] = """huggingface/label-files""" __lowercase : List[str] = num_labels __lowercase : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) ) , """r""" ) ) __lowercase : str = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} __lowercase : str = idalabel __lowercase : Dict = {v: k for k, v in idalabel.items()} __lowercase : List[Any] = CvtConfig(num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13": __lowercase : Optional[int] = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21": __lowercase : str = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __lowercase : Tuple = [2, 2, 20] __lowercase : str = [3, 12, 16] __lowercase : str = [192, 768, 1024] __lowercase : str = CvtForImageClassification(lowerCAmelCase_ ) __lowercase : Any = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" ) __lowercase : Tuple = image_size __lowercase : Optional[Any] = torch.load(lowerCAmelCase_ , map_location=torch.device("""cpu""" ) ) __lowercase : List[str] = OrderedDict() __lowercase : Optional[Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __lowercase : str = list_of_state_dict + cls_token(lowerCAmelCase_ ) __lowercase : List[Any] = list_of_state_dict + embeddings(lowerCAmelCase_ ) for cnt in range(config.depth[idx] ): __lowercase : int = list_of_state_dict + attention(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Union[str, Any] = list_of_state_dict + final() for gg in list_of_state_dict: print(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): __lowercase : Optional[int] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) image_processor.save_pretrained(lowerCAmelCase_ ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=3_84, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCamelCase : int = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
306
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Any = get_failure_array(lowerCAmelCase_ ) # 2) Step through text searching for pattern __lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern while i < len(lowerCAmelCase_ ): if pattern[j] == text[i]: if j == (len(lowerCAmelCase_ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase : Optional[Any] = failure[j - 1] continue i += 1 return False def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : List[Any] = [0] __lowercase : Optional[Any] = 0 __lowercase : List[Any] = 1 while j < len(lowerCAmelCase_ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase : List[str] = failure[i - 1] continue j += 1 failure.append(lowerCAmelCase_ ) return failure if __name__ == "__main__": # Test 1) lowerCamelCase : Dict = '''abc1abc12''' lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCamelCase : List[Any] = '''ABABX''' lowerCamelCase : List[Any] = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCamelCase : int = '''AAAB''' lowerCamelCase : Optional[int] = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCamelCase : Optional[Any] = '''abcdabcy''' lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCamelCase : Dict = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
306
1
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase : '''simple docstring''' def __init__( self : List[Any] , __a : Optional[Any] , __a : List[str]=99 , __a : str=13 , __a : Dict=7 , __a : List[str]=9 , __a : Union[str, Any]=True , __a : Any=True , __a : str=False , __a : str=32 , __a : Dict=5 , __a : str=4 , __a : Optional[Any]=37 , __a : List[str]=8 , __a : int=0.1 , __a : str=0.002 , __a : Optional[int]=1 , __a : Dict=0 , __a : Optional[Any]=0 , __a : List[str]=None , __a : Tuple=None , ) -> Optional[Any]: """simple docstring""" __lowercase : List[str] = parent __lowercase : Dict = batch_size __lowercase : str = encoder_seq_length __lowercase : int = decoder_seq_length # For common tests __lowercase : Dict = self.decoder_seq_length __lowercase : str = is_training __lowercase : Union[str, Any] = use_attention_mask __lowercase : Optional[Any] = use_labels __lowercase : List[Any] = vocab_size __lowercase : int = hidden_size __lowercase : Optional[int] = num_hidden_layers __lowercase : Union[str, Any] = num_attention_heads __lowercase : str = d_ff __lowercase : Tuple = relative_attention_num_buckets __lowercase : Optional[int] = dropout_rate __lowercase : str = initializer_factor __lowercase : str = eos_token_id __lowercase : Dict = pad_token_id __lowercase : Union[str, Any] = decoder_start_token_id __lowercase : Optional[Any] = None __lowercase : str = decoder_layers def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return TaConfig.from_pretrained("""google/umt5-base""" ) def lowerCAmelCase ( self : Dict , __a : Any , __a : Optional[Any] , __a : Optional[int] , __a : Optional[int]=None , __a : Dict=None , __a : List[Any]=None , __a : int=None , __a : Any=None , ) -> Optional[int]: """simple docstring""" if attention_mask is None: __lowercase : Optional[int] = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowercase : List[Any] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowercase : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__a ) if decoder_head_mask is None: __lowercase : Any = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__a ) if cross_attn_head_mask is None: __lowercase : str = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__a ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" __lowercase : str = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowercase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowercase : Any = input_ids.clamp(self.pad_token_id + 1 ) __lowercase : str = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowercase : Union[str, Any] = self.get_config() __lowercase : Optional[int] = config.num_attention_heads __lowercase : List[Any] = self.prepare_inputs_dict(__a , __a , __a ) return config, input_dict def lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase , __lowercase : str = self.prepare_config_and_inputs() return config, inputs_dict def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def lowerCAmelCase ( self : Tuple , __a : List[str] , __a : List[Any] , __a : Any , __a : Any , __a : Tuple , __a : List[str] , ) -> str: """simple docstring""" __lowercase : Dict = UMTaModel(config=__a ) model.to(__a ) model.eval() __lowercase : Any = model( input_ids=__a , decoder_input_ids=__a , attention_mask=__a , decoder_attention_mask=__a , ) __lowercase : Any = model(input_ids=__a , decoder_input_ids=__a ) __lowercase : Optional[int] = result.last_hidden_state __lowercase : Tuple = result.past_key_values __lowercase : str = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__a ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def lowerCAmelCase ( self : Dict , __a : Tuple , __a : Any , __a : str , __a : List[Any] , __a : str , __a : List[str] , ) -> Dict: """simple docstring""" __lowercase : Union[str, Any] = UMTaModel(config=__a ).get_decoder().to(__a ).eval() # first forward pass __lowercase : List[Any] = model(__a , use_cache=__a ) __lowercase : Dict = model(__a ) __lowercase : List[Any] = model(__a , use_cache=__a ) self.parent.assertTrue(len(__a ) == len(__a ) ) self.parent.assertTrue(len(__a ) == len(__a ) + 1 ) __lowercase , __lowercase : Union[str, Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowercase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowercase : int = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowercase : Dict = model(__a )["""last_hidden_state"""] __lowercase : List[str] = model(__a , past_key_values=__a )["""last_hidden_state"""] # select random slice __lowercase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowercase : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach() __lowercase : List[str] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) ) def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , ) -> List[str]: """simple docstring""" __lowercase : Dict = UMTaModel(config=__a ).to(__a ).half().eval() __lowercase : Tuple = model(**__a )["""last_hidden_state"""] self.parent.assertFalse(torch.isnan(__a ).any().item() ) @require_torch class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ): '''simple docstring''' _A : int = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) _A : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () _A : Tuple = ( { '''conversational''': UMTaForConditionalGeneration, '''feature-extraction''': UMTaModel, '''summarization''': UMTaForConditionalGeneration, '''text2text-generation''': UMTaForConditionalGeneration, '''translation''': UMTaForConditionalGeneration, '''question-answering''': UMTaForQuestionAnswering, } if is_torch_available() else {} ) _A : str = True _A : Tuple = False _A : List[Any] = False _A : List[Any] = True _A : str = True # The small UMT5 model needs higher percentages for CPU/MP tests _A : str = [0.8, 0.9] def lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" __lowercase : Optional[Any] = UMTaModelTester(self ) @unittest.skip("""Test has a segmentation fault on torch 1.8.0""" ) def lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" __lowercase : str = self.model_tester.prepare_config_and_inputs() __lowercase : List[str] = UMTaModel(config_and_inputs[0] ).to(__a ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=__a , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" __lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" __lowercase : List[str] = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""] __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() __lowercase : Any = config_and_inputs[0] __lowercase : Tuple = UMTaForConditionalGeneration(__a ).eval() model.to(__a ) __lowercase : Dict = { """head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__a ), """decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ), """cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ), } for attn_name, (name, mask) in zip(__a , head_masking.items() ): __lowercase : Dict = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowercase : Tuple = torch.ones( config.num_decoder_layers , config.num_heads , device=__a ) __lowercase : int = model.generate( config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__a , return_dict_in_generate=__a , **__a , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowercase : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" ) def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( """Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" ) def lowerCAmelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase : int = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__a ).to(__a ) __lowercase : List[Any] = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__a , legacy=__a ) __lowercase : List[str] = [ """Bonjour monsieur <extra_id_0> bien <extra_id_1>.""", """No se como puedo <extra_id_0>.""", """This is the reason why we <extra_id_0> them.""", """The <extra_id_0> walks in <extra_id_1>, seats""", """A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""", ] __lowercase : Dict = tokenizer(__a , return_tensors="""pt""" , padding=__a ).input_ids # fmt: off __lowercase : Tuple = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(__a , __a ) __lowercase : Union[str, Any] = model.generate(input_ids.to(__a ) ) __lowercase : Optional[int] = [ """<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""", """<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", ] __lowercase : int = tokenizer.batch_decode(__a ) self.assertEqual(__a , __a )
306
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''pixel_values'''] def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None: """simple docstring""" super().__init__(**__a ) __lowercase : Dict = size if size is not None else {"""shortest_edge""": 224} __lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a ) __lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" ) __lowercase : Optional[int] = do_resize __lowercase : Union[str, Any] = size __lowercase : List[Any] = resample __lowercase : Any = do_center_crop __lowercase : Dict = crop_size __lowercase : int = do_rescale __lowercase : Tuple = rescale_factor __lowercase : List[Any] = do_normalize __lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD __lowercase : Union[str, Any] = do_convert_rgb def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray: """simple docstring""" __lowercase : Dict = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) __lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray: """simple docstring""" __lowercase : Tuple = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: """simple docstring""" __lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize __lowercase : Dict = size if size is not None else self.size __lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a ) __lowercase : int = resample if resample is not None else self.resample __lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a ) __lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Tuple = image_mean if image_mean is not None else self.image_mean __lowercase : str = image_std if image_std is not None else self.image_std __lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase : Union[str, Any] = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images] # All transformations expect numpy arrays. __lowercase : Any = [to_numpy_array(__a ) for image in images] if do_resize: __lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_center_crop: __lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: __lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: __lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] __lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images] __lowercase : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=__a , tensor_type=__a )
306
1
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( '''pipelines_utils''', '''0.22.0''', '''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''', standard_warn=False, stacklevel=3, )
306
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ): __lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ ) return new.join(lowerCAmelCase_ ) def snake_case_ ( lowerCAmelCase_ : List[Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = {} __lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." ) if "res_path" in key: __lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): __lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): __lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 ) __lowercase : Dict = value.float() return upgrade @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ): from dall_e import Encoder __lowercase : Any = Encoder() if os.path.exists(lowerCAmelCase_ ): __lowercase : List[Any] = torch.load(lowerCAmelCase_ ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : int = ckpt.state_dict() encoder.load_state_dict(lowerCAmelCase_ ) if config_path is not None: __lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ ) else: __lowercase : List[str] = FlavaImageCodebookConfig() __lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval() __lowercase : List[Any] = encoder.state_dict() __lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ ) hf_model.load_state_dict(lowerCAmelCase_ ) __lowercase : Dict = hf_model.state_dict() __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(lowerCAmelCase_ ) else: return hf_state_dict if __name__ == "__main__": lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCamelCase : Union[str, Any] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
306
1
import json import sys def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ): with open(lowerCAmelCase_ , encoding="""utf-8""" ) as f: __lowercase : Optional[Any] = json.load(lowerCAmelCase_ ) __lowercase : Any = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """] for benchmark_name in sorted(lowerCAmelCase_ ): __lowercase : List[Any] = results[benchmark_name] __lowercase : int = benchmark_name.split("""/""" )[-1] output_md.append(F"### Benchmark: {benchmark_file_name}" ) __lowercase : Tuple = """| metric |""" __lowercase : Optional[Any] = """|--------|""" __lowercase : Tuple = """| new / old (diff) |""" for metric_name in sorted(lowerCAmelCase_ ): __lowercase : str = benchmark_res[metric_name] __lowercase : Tuple = metric_vals["""new"""] __lowercase : Union[str, Any] = metric_vals.get("""old""" , lowerCAmelCase_ ) __lowercase : Union[str, Any] = metric_vals.get("""diff""" , lowerCAmelCase_ ) __lowercase : Optional[Any] = F" {new_val:f}" if isinstance(lowerCAmelCase_ , (int, float) ) else """None""" if old_val is not None: val_str += F" / {old_val:f}" if isinstance(lowerCAmelCase_ , (int, float) ) else "None" if dif_val is not None: val_str += F" ({dif_val:f})" if isinstance(lowerCAmelCase_ , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("""</details>""" ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.writelines("""\n""".join(lowerCAmelCase_ ) ) if __name__ == "__main__": lowerCamelCase : str = sys.argv[1] lowerCamelCase : Union[str, Any] = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
306
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCamelCase : Tuple = logging.get_logger(__name__) logging.set_verbosity_info() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if "xprophetnet" in prophetnet_checkpoint_path: __lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) else: __lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) __lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""] __lowercase : Optional[int] = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: __lowercase : Tuple = key.split(""".""" ) if attributes[0] == "lm_head": __lowercase : str = prophet __lowercase : List[str] = prophet_old else: __lowercase : Tuple = prophet.prophetnet __lowercase : Union[str, Any] = prophet_old.model __lowercase : Optional[Any] = False for attribute in attributes: if attribute in mapping: __lowercase : Optional[int] = mapping[attribute] if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0: __lowercase : str = attribute elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : List[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __lowercase : Any = old_model.weight logger.info(F"{attribute} is initialized." ) __lowercase : Any = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __lowercase : Dict = old_model.bias logger.info(F"{attribute} is initialized" ) __lowercase : int = True break elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ): __lowercase : Dict = old_model.in_proj_weight.shape[0] // 3 __lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __lowercase : int = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." __lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) __lowercase : int = True break if attribute.isdigit(): __lowercase : Tuple = model[int(lowerCAmelCase_ )] __lowercase : int = old_model[int(lowerCAmelCase_ )] else: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if old_attribute == "": __lowercase : int = old_model else: if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError(F"{old_model} does not have {old_attribute}" ) __lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if not is_key_init: raise ValueError(F"{key} was not correctly initialized!" ) print(F"Saving model to {pytorch_dump_folder_path}" ) prophet.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase : Any = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
306
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCamelCase : Dict = False class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" return 12 @property def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return 12 @property def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return 32 @property def lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" torch.manual_seed(0 ) __lowercase : Union[str, Any] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" __lowercase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) __lowercase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(__a ) @property def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) __lowercase : Any = 12 __lowercase : Any = 12 __lowercase : Dict = { """attention_bias""": True, """cross_attention_dim""": 32, """attention_head_dim""": height * width, """num_attention_heads""": 1, """num_vector_embeds""": self.num_embed, """num_embeds_ada_norm""": self.num_embeds_ada_norm, """norm_num_groups""": 32, """sample_size""": width, """activation_fn""": """geglu-approximate""", } __lowercase : int = TransformeraDModel(**__a ) return model def lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" __lowercase : List[str] = """cpu""" __lowercase : str = self.dummy_vqvae __lowercase : Dict = self.dummy_text_encoder __lowercase : str = self.dummy_tokenizer __lowercase : Union[str, Any] = self.dummy_transformer __lowercase : Optional[Any] = VQDiffusionScheduler(self.num_embed ) __lowercase : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__a ) __lowercase : int = VQDiffusionPipeline( vqvae=__a , text_encoder=__a , tokenizer=__a , transformer=__a , scheduler=__a , learned_classifier_free_sampling_embeddings=__a , ) __lowercase : Union[str, Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __lowercase : Dict = """teddy bear playing in the pool""" __lowercase : List[str] = torch.Generator(device=__a ).manual_seed(0 ) __lowercase : Tuple = pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="""np""" ) __lowercase : Optional[Any] = output.images __lowercase : List[Any] = torch.Generator(device=__a ).manual_seed(0 ) __lowercase : int = pipe( [prompt] , generator=__a , output_type="""np""" , return_dict=__a , num_inference_steps=2 )[0] __lowercase : Optional[Any] = image[0, -3:, -3:, -1] __lowercase : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowercase : str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase : int = """cpu""" __lowercase : Dict = self.dummy_vqvae __lowercase : List[Any] = self.dummy_text_encoder __lowercase : Dict = self.dummy_tokenizer __lowercase : Dict = self.dummy_transformer __lowercase : int = VQDiffusionScheduler(self.num_embed ) __lowercase : int = LearnedClassifierFreeSamplingEmbeddings( learnable=__a , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __lowercase : List[str] = VQDiffusionPipeline( vqvae=__a , text_encoder=__a , tokenizer=__a , transformer=__a , scheduler=__a , learned_classifier_free_sampling_embeddings=__a , ) __lowercase : Optional[Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __lowercase : Tuple = """teddy bear playing in the pool""" __lowercase : List[Any] = torch.Generator(device=__a ).manual_seed(0 ) __lowercase : int = pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="""np""" ) __lowercase : Any = output.images __lowercase : Optional[int] = torch.Generator(device=__a ).manual_seed(0 ) __lowercase : Any = pipe( [prompt] , generator=__a , output_type="""np""" , return_dict=__a , num_inference_steps=2 )[0] __lowercase : List[str] = image[0, -3:, -3:, -1] __lowercase : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowercase : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" ) __lowercase : Dict = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" ) __lowercase : Any = pipeline.to(__a ) pipeline.set_progress_bar_config(disable=__a ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __lowercase : Optional[Any] = torch.Generator(device=__a ).manual_seed(0 ) __lowercase : Union[str, Any] = pipeline( """teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__a , output_type="""np""" , ) __lowercase : List[Any] = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
306
def snake_case_ ( lowerCAmelCase_ : int = 200 ): __lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200] __lowercase : List[str] = [0] * (pence + 1) __lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowerCAmelCase_ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
306
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : Optional[Any] = { '''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCAmelCase ( __a ): '''simple docstring''' _A : str = '''vit_mae''' def __init__( self : str , __a : Optional[Any]=768 , __a : Tuple=12 , __a : Any=12 , __a : Optional[Any]=3072 , __a : Union[str, Any]="gelu" , __a : Optional[Any]=0.0 , __a : Optional[Any]=0.0 , __a : str=0.02 , __a : int=1E-12 , __a : int=224 , __a : List[str]=16 , __a : Any=3 , __a : Any=True , __a : Union[str, Any]=16 , __a : int=512 , __a : List[str]=8 , __a : Optional[Any]=2048 , __a : Union[str, Any]=0.75 , __a : Dict=False , **__a : Optional[Any] , ) -> Optional[Any]: """simple docstring""" super().__init__(**__a ) __lowercase : Optional[Any] = hidden_size __lowercase : Any = num_hidden_layers __lowercase : Tuple = num_attention_heads __lowercase : Optional[int] = intermediate_size __lowercase : Dict = hidden_act __lowercase : str = hidden_dropout_prob __lowercase : str = attention_probs_dropout_prob __lowercase : List[str] = initializer_range __lowercase : int = layer_norm_eps __lowercase : List[str] = image_size __lowercase : Dict = patch_size __lowercase : Union[str, Any] = num_channels __lowercase : Optional[int] = qkv_bias __lowercase : Union[str, Any] = decoder_num_attention_heads __lowercase : List[str] = decoder_hidden_size __lowercase : Optional[int] = decoder_num_hidden_layers __lowercase : List[str] = decoder_intermediate_size __lowercase : Optional[Any] = mask_ratio __lowercase : Any = norm_pix_loss
306
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any: """simple docstring""" __lowercase : Optional[int] = parent __lowercase : List[str] = out_indices if out_indices is not None else [4] __lowercase : Optional[int] = stage_names __lowercase : Any = out_features __lowercase : Optional[Any] = backbone __lowercase : Optional[Any] = batch_size __lowercase : Union[str, Any] = image_size __lowercase : List[str] = num_channels __lowercase : str = use_pretrained_backbone __lowercase : str = is_training def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : str = self.get_config() return config, pixel_values def lowerCAmelCase ( self : int ) -> str: """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict: """simple docstring""" __lowercase : Dict = TimmBackbone(config=__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowercase : Optional[Any] = model(__a ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Union[str, Any] = self.prepare_config_and_inputs() __lowercase , __lowercase : str = config_and_inputs __lowercase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch @require_timm class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ): '''simple docstring''' _A : List[Any] = (TimmBackbone,) if is_torch_available() else () _A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {} _A : List[Any] = False _A : List[str] = False _A : Any = False _A : Optional[Any] = False def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : str = TimmBackboneModelTester(self ) __lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a ) def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : Tuple = """resnet18""" __lowercase : Optional[int] = """microsoft/resnet-18""" __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a ) __lowercase : Dict = AutoBackbone.from_pretrained(__a ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] ) __lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("""TimmBackbone doesn't support feed forward chunking""" ) def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" ) def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("""TimmBackbone initialization is managed on the timm side""" ) def lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" ) def lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't support output_attentions.""" ) def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Safetensors is not supported by timm.""" ) def lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[Any] = model_class(__a ) __lowercase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" __lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Optional[Any] = True __lowercase : Union[str, Any] = self.has_attentions # no need to test all models as different heads yield the same functionality __lowercase : Union[str, Any] = self.all_model_classes[0] __lowercase : List[Any] = model_class(__a ) model.to(__a ) __lowercase : Optional[Any] = self._prepare_for_class(__a , __a ) __lowercase : Union[str, Any] = model(**__a ) __lowercase : Optional[int] = outputs[0][-1] # Encoder-/Decoder-only models __lowercase : Any = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __lowercase : Optional[int] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__a ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : List[str] = model_class(__a ) model.to(__a ) model.eval() __lowercase : int = model(**__a ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __lowercase : Any = copy.deepcopy(__a ) __lowercase : Dict = None __lowercase : Tuple = model_class(__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model(**__a ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __lowercase : List[str] = copy.deepcopy(__a ) __lowercase : Optional[Any] = False __lowercase : str = model_class(__a ) model.to(__a ) model.eval() __lowercase : List[Any] = model(**__a )
306
1
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class lowerCAmelCase : '''simple docstring''' _A : int _A : Node | None = None _A : Node | None = None def snake_case_ ( ): __lowercase : List[str] = Node(1 ) __lowercase : int = Node(2 ) __lowercase : Dict = Node(3 ) __lowercase : Union[str, Any] = Node(4 ) __lowercase : Any = Node(5 ) return tree def snake_case_ ( lowerCAmelCase_ : Node | None ): return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def snake_case_ ( lowerCAmelCase_ : Node | None ): return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def snake_case_ ( lowerCAmelCase_ : Node | None ): return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def snake_case_ ( lowerCAmelCase_ : Node | None ): return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def snake_case_ ( lowerCAmelCase_ : Node | None ): __lowercase : list[Any] = [] if root is None: return output __lowercase : Tuple = deque([root] ) while process_queue: __lowercase : Dict = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def snake_case_ ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ): __lowercase : list[Any] = [] def populate_output(lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(lowerCAmelCase_ , lowerCAmelCase_ ) return output def snake_case_ ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ): __lowercase : list[Any] = [] def populate_output(lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(lowerCAmelCase_ , lowerCAmelCase_ ) return output def snake_case_ ( lowerCAmelCase_ : Node | None ): if root is None: return [] __lowercase : list[Sequence[Node | None]] = [] __lowercase : str = 0 __lowercase : Dict = height(lowerCAmelCase_ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(lowerCAmelCase_ , lowerCAmelCase_ ) ) __lowercase : Optional[Any] = 1 else: output.append(get_nodes_from_right_to_left(lowerCAmelCase_ , lowerCAmelCase_ ) ) __lowercase : List[Any] = 0 return output def snake_case_ ( ): # Main function for testing. __lowercase : Dict = make_tree() print(F"In-order Traversal: {inorder(lowerCAmelCase_ )}" ) print(F"Pre-order Traversal: {preorder(lowerCAmelCase_ )}" ) print(F"Post-order Traversal: {postorder(lowerCAmelCase_ )}" , """\n""" ) print(F"Height of Tree: {height(lowerCAmelCase_ )}" , """\n""" ) print("""Complete Level Order Traversal: """ ) print(level_order(lowerCAmelCase_ ) , """\n""" ) print("""Level-wise order Traversal: """ ) for level in range(1 , height(lowerCAmelCase_ ) + 1 ): print(F"Level {level}:" , get_nodes_from_left_to_right(lowerCAmelCase_ , level=lowerCAmelCase_ ) ) print("""\nZigZag order Traversal: """ ) print(zigzag(lowerCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
306
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : str = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } lowerCamelCase : Optional[Any] = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ): for attribute in key.split(""".""" ): __lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: __lowercase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowercase : Dict = value elif weight_type == "weight_g": __lowercase : Union[str, Any] = value elif weight_type == "weight_v": __lowercase : List[Any] = value elif weight_type == "bias": __lowercase : int = value elif weight_type == "running_mean": __lowercase : List[Any] = value elif weight_type == "running_var": __lowercase : int = value elif weight_type == "num_batches_tracked": __lowercase : int = value elif weight_type == "inv_freq": __lowercase : Optional[Any] = value else: __lowercase : Any = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ): __lowercase : str = [] __lowercase : Any = fairseq_model.state_dict() __lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Optional[Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __lowercase : Tuple = True if "*" in mapped_key: __lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2] __lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ ) if "pos_bias_u" in name: __lowercase : Any = None elif "pos_bias_v" in name: __lowercase : Tuple = None elif "weight_g" in name: __lowercase : Union[str, Any] = """weight_g""" elif "weight_v" in name: __lowercase : Dict = """weight_v""" elif "bias" in name: __lowercase : Union[str, Any] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowercase : str = """weight""" elif "running_mean" in name: __lowercase : str = """running_mean""" elif "inv_freq" in name: __lowercase : List[Any] = """inv_freq""" elif "running_var" in name: __lowercase : Any = """running_var""" elif "num_batches_tracked" in name: __lowercase : Any = """num_batches_tracked""" else: __lowercase : Optional[int] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(F"Unused weights: {unused_weights}" ) def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ): __lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1] __lowercase : int = name.split(""".""" ) __lowercase : Optional[Any] = int(items[0] ) __lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowercase : List[str] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __lowercase : Dict = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ): if config_path is not None: __lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" ) else: __lowercase : List[Any] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __lowercase : Tuple = """rotary""" if is_finetuned: if dict_path: __lowercase : Any = Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : List[Any] = target_dict.pad_index __lowercase : Optional[int] = target_dict.bos_index __lowercase : List[Any] = target_dict.eos_index __lowercase : List[str] = len(target_dict.symbols ) __lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" ) if not os.path.isdir(lowerCAmelCase_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) __lowercase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched __lowercase : int = 0 __lowercase : Any = 1 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Dict = WavaVecaCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , ) __lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False __lowercase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) __lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) __lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ ) else: __lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ ) if is_finetuned: __lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" ) __lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ ) __lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ ) __lowercase : Dict = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCamelCase : Any = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
306
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase : Tuple = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCamelCase : Any = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCamelCase : Any = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCamelCase : Optional[Any] = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_12, '''facebook/dpr-ctx_encoder-multiset-base''': 5_12, } lowerCamelCase : Dict = { '''facebook/dpr-question_encoder-single-nq-base''': 5_12, '''facebook/dpr-question_encoder-multiset-base''': 5_12, } lowerCamelCase : Union[str, Any] = { '''facebook/dpr-reader-single-nq-base''': 5_12, '''facebook/dpr-reader-multiset-base''': 5_12, } lowerCamelCase : Optional[int] = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCamelCase : Dict = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCamelCase : List[str] = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class lowerCAmelCase ( __a ): '''simple docstring''' _A : Dict = VOCAB_FILES_NAMES _A : Dict = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _A : Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[Any] = VOCAB_FILES_NAMES _A : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _A : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : int = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCamelCase : Optional[Any] = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCamelCase : int = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCamelCase : Tuple = r''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(__a ) class lowerCAmelCase : '''simple docstring''' def __call__( self : Dict , __a : Dict , __a : Optional[str] = None , __a : Optional[str] = None , __a : Union[bool, str] = False , __a : Union[bool, str] = False , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = None , **__a : List[Any] , ) -> BatchEncoding: """simple docstring""" if titles is None and texts is None: return super().__call__( __a , padding=__a , truncation=__a , max_length=__a , return_tensors=__a , return_attention_mask=__a , **__a , ) elif titles is None or texts is None: __lowercase : str = titles if texts is None else texts return super().__call__( __a , __a , padding=__a , truncation=__a , max_length=__a , return_tensors=__a , return_attention_mask=__a , **__a , ) __lowercase : Optional[int] = titles if not isinstance(__a , __a ) else [titles] __lowercase : Any = texts if not isinstance(__a , __a ) else [texts] __lowercase : int = len(__a ) __lowercase : Optional[int] = questions if not isinstance(__a , __a ) else [questions] * n_passages if len(__a ) != len(__a ): raise ValueError( F"There should be as many titles than texts but got {len(__a )} titles and {len(__a )} texts." ) __lowercase : List[str] = super().__call__(__a , __a , padding=__a , truncation=__a )["""input_ids"""] __lowercase : str = super().__call__(__a , add_special_tokens=__a , padding=__a , truncation=__a )["""input_ids"""] __lowercase : str = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__a , __a ) ] } if return_attention_mask is not False: __lowercase : int = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase : Optional[int] = attention_mask return self.pad(__a , padding=__a , max_length=__a , return_tensors=__a ) def lowerCAmelCase ( self : Optional[Any] , __a : BatchEncoding , __a : DPRReaderOutput , __a : int = 16 , __a : int = 64 , __a : int = 4 , ) -> List[DPRSpanPrediction]: """simple docstring""" __lowercase : Any = reader_input["""input_ids"""] __lowercase , __lowercase , __lowercase : Union[str, Any] = reader_output[:3] __lowercase : str = len(__a ) __lowercase : str = sorted(range(__a ) , reverse=__a , key=relevance_logits.__getitem__ ) __lowercase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __lowercase : str = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase : Any = sequence_ids.index(self.pad_token_id ) else: __lowercase : Optional[Any] = len(__a ) __lowercase : Optional[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__a , top_spans=__a , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__a , start_index=__a , end_index=__a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__a ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowerCAmelCase ( self : List[Any] , __a : List[int] , __a : List[int] , __a : int , __a : int , ) -> List[DPRSpanPrediction]: """simple docstring""" __lowercase : Union[str, Any] = [] for start_index, start_score in enumerate(__a ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase : str = sorted(__a , key=lambda __a : x[1] , reverse=__a ) __lowercase : Union[str, Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]" ) __lowercase : List[Any] = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"Span is too long: {length} > {max_answer_length}" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__a ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__a ) class lowerCAmelCase ( __a , __a ): '''simple docstring''' _A : Optional[Any] = VOCAB_FILES_NAMES _A : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP _A : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Optional[Any] = READER_PRETRAINED_INIT_CONFIGURATION _A : Optional[int] = ['''input_ids''', '''attention_mask''']
306
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError("""String lengths must match!""" ) __lowercase : str = 0 for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
306
1
from __future__ import annotations from collections.abc import Callable lowerCamelCase : Optional[int] = list[list[float | int]] def snake_case_ ( lowerCAmelCase_ : Matrix , lowerCAmelCase_ : Matrix ): __lowercase : int = len(lowerCAmelCase_ ) __lowercase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase_ )] __lowercase : int __lowercase : int __lowercase : int __lowercase : int __lowercase : int __lowercase : float for row in range(lowerCAmelCase_ ): for col in range(lowerCAmelCase_ ): __lowercase : int = matrix[row][col] __lowercase : Optional[int] = vector[row][0] __lowercase : Any = 0 __lowercase : List[Any] = 0 while row < size and col < size: # pivoting __lowercase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase_ , lowerCAmelCase_ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __lowercase , __lowercase : Tuple = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , lowerCAmelCase_ ): __lowercase : str = augmented[rowa][col] / augmented[row][col] __lowercase : Dict = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , lowerCAmelCase_ ): for row in range(lowerCAmelCase_ ): __lowercase : Any = augmented[row][col] / augmented[col][col] for cola in range(lowerCAmelCase_ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowerCAmelCase_ ) ] def snake_case_ ( lowerCAmelCase_ : list[int] ): __lowercase : int = len(lowerCAmelCase_ ) __lowercase : Matrix = [[0 for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )] __lowercase : Matrix = [[0] for _ in range(lowerCAmelCase_ )] __lowercase : Matrix __lowercase : int __lowercase : int __lowercase : int for x_val, y_val in enumerate(lowerCAmelCase_ ): for col in range(lowerCAmelCase_ ): __lowercase : Tuple = (x_val + 1) ** (size - col - 1) __lowercase : int = y_val __lowercase : List[str] = solve(lowerCAmelCase_ , lowerCAmelCase_ ) def interpolated_func(lowerCAmelCase_ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(lowerCAmelCase_ ) ) return interpolated_func def snake_case_ ( lowerCAmelCase_ : int ): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def snake_case_ ( lowerCAmelCase_ : Callable[[int], int] = question_function , lowerCAmelCase_ : int = 10 ): __lowercase : list[int] = [func(lowerCAmelCase_ ) for x_val in range(1 , order + 1 )] __lowercase : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __lowercase : int = 0 __lowercase : Callable[[int], int] __lowercase : int for poly in polynomials: __lowercase : Optional[int] = 1 while func(lowerCAmelCase_ ) == poly(lowerCAmelCase_ ): x_val += 1 ret += poly(lowerCAmelCase_ ) return ret if __name__ == "__main__": print(f'''{solution() = }''')
306
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case_ ( lowerCAmelCase_ : Tuple ): if isinstance(lowerCAmelCase_ , collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCAmelCase : '''simple docstring''' def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" pass def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]: """simple docstring""" __lowercase : List[str] = np.abs((a - b) ).max() self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." ) def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str: """simple docstring""" __lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a ) __lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = after_output[0] __lowercase : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-3 ) def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : str = self.get_vision_text_model(__a , __a ) __lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Union[str, Any] = model( input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a ) __lowercase : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(__a ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase : Optional[int] = to_atuple(vision_model.config.image_size ) __lowercase : List[str] = to_atuple(vision_model.config.patch_size ) __lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase : Dict = output.text_model_output.attentions self.assertEqual(len(__a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]: """simple docstring""" pt_model.to(__a ) pt_model.eval() # prepare inputs __lowercase : Union[str, Any] = inputs_dict __lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): __lowercase : Union[str, Any] = pt_model(**__a ).to_tuple() __lowercase : Tuple = fx_model(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__a ) __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a ) __lowercase : Dict = fx_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__a ) __lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a ) pt_model_loaded.to(__a ) pt_model_loaded.eval() with torch.no_grad(): __lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 ) def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = VisionTextDualEncoderModel(__a ) __lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a ) __lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a ) __lowercase : Any = fx_state self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str: """simple docstring""" __lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a ) __lowercase : Dict = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params ) self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__a ) def lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = self.prepare_config_and_inputs() self.check_save_load(**__a ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" __lowercase : str = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__a ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() __lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" ) __lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" ) __lowercase : Dict = config_inputs_dict self.check_equivalence_pt_to_flax(__a , __a , __a ) self.check_equivalence_flax_to_pt(__a , __a , __a ) @slow def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs() __lowercase : Dict = model_a(**__a ) __lowercase : Any = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__a ) __lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Optional[int] = model_a(**__a ) __lowercase : Tuple = after_outputs[0] __lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-5 ) @require_flax class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" __lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : int = 13 __lowercase : Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : Tuple = random_attention_mask([batch_size, 4] ) __lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict: """simple docstring""" __lowercase : int = FlaxViTModel(__a ) __lowercase : List[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = FlaxViTModelTester(self ) __lowercase : str = FlaxBertModelTester(self ) __lowercase : List[str] = vit_model_tester.prepare_config_and_inputs() __lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Optional[int] = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : Tuple = 13 __lowercase : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : List[Any] = random_attention_mask([batch_size, 4] ) __lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : Dict = FlaxCLIPVisionModel(__a ) __lowercase : Optional[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : List[Any] = FlaxCLIPVisionModelTester(self ) __lowercase : Optional[Any] = FlaxBertModelTester(self ) __lowercase : Any = clip_model_tester.prepare_config_and_inputs() __lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Dict = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) __lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __lowercase : Tuple = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" ) __lowercase : Optional[int] = model(**__a ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
306
1
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : Dict = DebertaTokenizer _A : str = True _A : str = DebertaTokenizerFast def lowerCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowercase : Optional[int] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """[UNK]""", ] __lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) ) __lowercase : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __lowercase : List[Any] = {"""unk_token""": """[UNK]"""} __lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__a ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__a ) ) def lowerCAmelCase ( self : Optional[Any] , **__a : int ) -> List[str]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a ) def lowerCAmelCase ( self : Union[str, Any] , __a : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : str = """lower newer""" __lowercase : Optional[int] = """lower newer""" return input_text, output_text def lowerCAmelCase ( self : int ) -> Optional[int]: """simple docstring""" __lowercase : List[str] = self.get_tokenizer() __lowercase : Optional[Any] = """lower newer""" __lowercase : Optional[int] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] __lowercase : Union[str, Any] = tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) __lowercase : Any = tokens + [tokenizer.unk_token] __lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase : int = self.get_tokenizer() __lowercase : Optional[Any] = tokenizer("""Hello""" , """World""" ) __lowercase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["""token_type_ids"""] , __a ) @slow def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Optional[int] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) __lowercase : List[str] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a ) __lowercase : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a ) __lowercase : int = tokenizer.encode( """sequence builders""" , add_special_tokens=__a , add_prefix_space=__a ) __lowercase : Dict = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a ) __lowercase : Dict = tokenizer.build_inputs_with_special_tokens(__a ) __lowercase : str = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase : Union[str, Any] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowercase : Dict = tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) __lowercase : List[Any] = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] __lowercase : Dict = tokenizer(__a , padding=__a ) __lowercase : Union[str, Any] = [tokenizer.decode(__a , skip_special_tokens=__a ) for seq in encoding["""input_ids"""]] # fmt: off __lowercase : Any = { """input_ids""": [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], """token_type_ids""": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowercase : int = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] self.assertDictEqual(encoding.data , __a ) for expected, decoded in zip(__a , __a ): self.assertEqual(__a , __a )
306
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
306
1
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = 2 __lowercase : Union[str, Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowerCAmelCase_ ) if n > 1: factors.append(lowerCAmelCase_ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
306
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex lowerCamelCase : Union[str, Any] = 10 lowerCamelCase : List[str] = 2_56 def snake_case_ ( lowerCAmelCase_ : List[str] ): if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS: return None __lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ ) for token in set(lowerCAmelCase_ ): min_hash.update(token.encode() ) return min_hash def snake_case_ ( lowerCAmelCase_ : str ): return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0} class lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] , *, __a : float = 0.85 , ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[Any] = duplication_jaccard_threshold __lowercase : Optional[Any] = NUM_PERM __lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __lowercase : List[str] = defaultdict(__a ) def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None: """simple docstring""" __lowercase : List[Any] = self._index.query(__a ) if code_key in self._index.keys: print(F"Duplicate key {code_key}" ) return self._index.insert(__a , __a ) if len(__a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__a ) break else: self._duplicate_clusters[close_duplicates[0]].add(__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]: """simple docstring""" __lowercase : Dict = [] for base, duplicates in self._duplicate_clusters.items(): __lowercase : List[str] = [base] + list(__a ) # reformat the cluster to be a list of dict __lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(__a ) return duplicate_clusters def lowerCAmelCase ( self : Any , __a : int ) -> None: """simple docstring""" __lowercase : Tuple = self.get_duplicate_clusters() with open(__a , """w""" ) as f: json.dump(__a , __a ) def snake_case_ ( lowerCAmelCase_ : str ): __lowercase , __lowercase : Union[str, Any] = element __lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ): with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ): if data is not None: yield data def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ): __lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ): di.add(lowerCAmelCase_ , lowerCAmelCase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : List[str] = get_tokens(lowerCAmelCase_ ) __lowercase : Dict = get_tokens(lowerCAmelCase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowerCamelCase : List[str] = None def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ): __lowercase : Union[str, Any] = [] for elementa in cluster: __lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: __lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __lowercase : Dict = 1 extremes.append(lowerCAmelCase_ ) return extremes def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ): global _shared_dataset __lowercase : Tuple = dataset __lowercase : Optional[int] = [] __lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ): extremes_list.append(lowerCAmelCase_ ) return extremes_list def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ): __lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} __lowercase : int = {} __lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for extremes in extremes_clusters: for element in extremes: __lowercase : Optional[Any] = element __lowercase : int = duplicate_indices - set(extreme_dict.keys() ) __lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __lowercase : List[str] = element["""base_index"""] in extreme_dict if element["is_extreme"]: __lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""] print(F"Original dataset size: {len(lowerCAmelCase_ )}" ) print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" ) print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" ) return ds_filter, duplicate_clusters
306
1
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : List[Any] = F"Expected string as input, found {type(lowerCAmelCase_ )}" raise ValueError(lowerCAmelCase_ ) if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : Optional[int] = F"Expected boolean as use_pascal parameter, found {type(lowerCAmelCase_ )}" raise ValueError(lowerCAmelCase_ ) __lowercase : List[str] = input_str.split("""_""" ) __lowercase : Any = 0 if use_pascal else 1 __lowercase : int = words[start_index:] __lowercase : Optional[int] = [word[0].upper() + word[1:] for word in words_to_capitalize] __lowercase : List[Any] = """""" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
306
from ...processing_utils import ProcessorMixin class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''image_processor''', '''feature_extractor'''] _A : List[Any] = '''TvltImageProcessor''' _A : Optional[int] = '''TvltFeatureExtractor''' def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]: """simple docstring""" super().__init__(image_processor=__a , feature_extractor=__a ) __lowercase : Union[str, Any] = image_processor __lowercase : Tuple = feature_extractor def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict: """simple docstring""" if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) __lowercase : Tuple = None if images is not None: __lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a ) if images_mixed is not None: __lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a ) if audio is not None: __lowercase : Optional[Any] = self.feature_extractor( __a , *__a , sampling_rate=__a , mask_audio=__a , **__a ) __lowercase : Tuple = {} if audio is not None: output_dict.update(__a ) if images is not None: output_dict.update(__a ) if images_mixed_dict is not None: output_dict.update(__a ) return output_dict @property def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase : int = self.image_processor.model_input_names __lowercase : Union[str, Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
306
1
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class lowerCAmelCase ( __a ): '''simple docstring''' @require_torch def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Optional[int] = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ __lowercase : int = """ mname = \"hf-internal-testing/tiny-random-bert\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task=\"fill-mask\", model=mname) print(\"success\") """ __lowercase : List[Any] = """ import socket def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache __lowercase : Optional[Any] = """hf-internal-testing/tiny-random-bert""" BertConfig.from_pretrained(__a ) BertModel.from_pretrained(__a ) BertTokenizer.from_pretrained(__a ) pipeline(task="""fill-mask""" , model=__a ) # baseline - just load from_pretrained with normal network __lowercase : str = [sys.executable, """-c""", """\n""".join([load, run, mock] )] # should succeed __lowercase : Optional[int] = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __lowercase : Dict = """1""" __lowercase : List[str] = subprocess.run(__a , env=__a , check=__a , capture_output=__a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" __lowercase : Any = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ __lowercase : Optional[Any] = """ mname = \"hf-internal-testing/tiny-random-bert\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task=\"fill-mask\", model=mname) print(\"success\") """ __lowercase : int = """ import socket def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache __lowercase : List[str] = """hf-internal-testing/tiny-random-bert""" BertConfig.from_pretrained(__a ) BertModel.from_pretrained(__a ) BertTokenizer.from_pretrained(__a ) pipeline(task="""fill-mask""" , model=__a ) # baseline - just load from_pretrained with normal network __lowercase : str = [sys.executable, """-c""", """\n""".join([load, run, mock] )] # should succeed __lowercase : Optional[int] = self.get_env() __lowercase : Tuple = subprocess.run(__a , env=__a , check=__a , capture_output=__a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def lowerCAmelCase ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase : Tuple = """ from transformers import BertConfig, BertModel, BertTokenizer """ __lowercase : int = """ mname = \"hf-internal-testing/tiny-random-bert-sharded\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print(\"success\") """ __lowercase : str = """ import socket def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\") socket.socket = offline_socket """ # baseline - just load from_pretrained with normal network __lowercase : str = [sys.executable, """-c""", """\n""".join([load, run] )] # should succeed __lowercase : List[str] = self.get_env() __lowercase : Any = subprocess.run(__a , env=__a , check=__a , capture_output=__a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) # next emulate no network __lowercase : List[str] = [sys.executable, """-c""", """\n""".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __lowercase : Dict = """1""" __lowercase : List[str] = subprocess.run(__a , env=__a , check=__a , capture_output=__a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" __lowercase : str = """ from transformers import pipeline """ __lowercase : List[str] = """ mname = \"hf-internal-testing/tiny-random-bert\" pipe = pipeline(model=mname) """ __lowercase : List[str] = """ import socket def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\") socket.socket = offline_socket """ __lowercase : str = self.get_env() __lowercase : str = """1""" __lowercase : Tuple = [sys.executable, """-c""", """\n""".join([load, mock, run] )] __lowercase : Optional[int] = subprocess.run(__a , env=__a , check=__a , capture_output=__a ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( """You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , ) @require_torch def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" __lowercase : List[str] = """ from transformers import AutoModel """ __lowercase : Any = """ mname = \"hf-internal-testing/test_dynamic_model\" AutoModel.from_pretrained(mname, trust_remote_code=True) print(\"success\") """ # baseline - just load from_pretrained with normal network __lowercase : Any = [sys.executable, """-c""", """\n""".join([load, run] )] # should succeed __lowercase : Dict = self.get_env() __lowercase : Dict = subprocess.run(__a , env=__a , check=__a , capture_output=__a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __lowercase : List[str] = """1""" __lowercase : Any = subprocess.run(__a , env=__a , check=__a , capture_output=__a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() )
306
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class lowerCAmelCase : '''simple docstring''' def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = parent __lowercase : int = batch_size __lowercase : Any = seq_length __lowercase : str = is_training __lowercase : str = use_input_mask __lowercase : Optional[int] = use_token_type_ids __lowercase : List[Any] = use_labels __lowercase : Optional[Any] = vocab_size __lowercase : int = hidden_size __lowercase : List[Any] = num_hidden_layers __lowercase : Dict = num_attention_heads __lowercase : Any = intermediate_size __lowercase : Dict = hidden_act __lowercase : Union[str, Any] = hidden_dropout_prob __lowercase : List[Any] = attention_probs_dropout_prob __lowercase : List[str] = max_position_embeddings __lowercase : Union[str, Any] = type_vocab_size __lowercase : Dict = type_sequence_label_size __lowercase : Union[str, Any] = initializer_range __lowercase : List[Any] = num_labels __lowercase : str = num_choices __lowercase : Tuple = scope def lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : int = None if self.use_input_mask: __lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : str = None __lowercase : Optional[Any] = None __lowercase : Tuple = None if self.use_labels: __lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() __lowercase : str = model(__a , attention_mask=__a ) __lowercase : List[Any] = model(__a ) __lowercase : Optional[int] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : List[str] = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() __lowercase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase : Tuple = self.num_labels __lowercase : Any = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase : Any = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) : List[str] = config_and_inputs __lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = False _A : Any = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) _A : Optional[Any] = () _A : List[Any] = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) _A : Optional[Any] = True def lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" __lowercase : Optional[int] = EsmModelTester(self ) __lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase : Union[str, Any] = type self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : int ) -> Any: """simple docstring""" __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : List[str] = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : List[str] = EsmEmbeddings(config=__a ) __lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) __lowercase : int = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) __lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : Optional[Any] = EsmEmbeddings(config=__a ) __lowercase : Optional[int] = torch.empty(2 , 4 , 30 ) __lowercase : Tuple = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] __lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) __lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @require_torch class lowerCAmelCase ( __a ): '''simple docstring''' @slow def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase : List[str] = model(__a )[0] __lowercase : Union[str, Any] = 33 __lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) __lowercase : List[Any] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __lowercase : Any = model(__a )[0] # compare the actual values for a slice. __lowercase : int = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
306
1
def snake_case_ ( lowerCAmelCase_ : list[int] ): if not numbers: return 0 if not isinstance(lowerCAmelCase_ , (list, tuple) ) or not all( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) __lowercase : Tuple = numbers[0] for i in range(1 , len(lowerCAmelCase_ ) ): # update the maximum and minimum subarray products __lowercase : Union[str, Any] = numbers[i] if number < 0: __lowercase , __lowercase : List[str] = min_till_now, max_till_now __lowercase : int = max(lowerCAmelCase_ , max_till_now * number ) __lowercase : int = min(lowerCAmelCase_ , min_till_now * number ) # update the maximum product found till now __lowercase : Any = max(lowerCAmelCase_ , lowerCAmelCase_ ) return max_prod
306
def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : int = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def snake_case_ ( lowerCAmelCase_ : int = 5000 ): __lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )] for i, pentagonal_i in enumerate(lowerCAmelCase_ ): for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ): __lowercase : int = pentagonal_nums[j] __lowercase : Optional[int] = pentagonal_i + pentagonal_j __lowercase : Union[str, Any] = pentagonal_j - pentagonal_i if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ): return b return -1 if __name__ == "__main__": print(f'''{solution() = }''')
306
1
from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) lowerCamelCase : Tuple = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class lowerCAmelCase ( __a ): '''simple docstring''' _A : Any = '''trajectory_transformer''' _A : Union[str, Any] = ['''past_key_values'''] _A : Union[str, Any] = { '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Any , __a : Union[str, Any]=100 , __a : str=5 , __a : Union[str, Any]=1 , __a : Tuple=1 , __a : int=249 , __a : Dict=6 , __a : Tuple=17 , __a : Any=25 , __a : List[Any]=4 , __a : Dict=4 , __a : Tuple=128 , __a : List[str]=0.1 , __a : int=0.1 , __a : Any=0.1 , __a : Union[str, Any]=0.0006 , __a : Optional[int]=512 , __a : Optional[Any]=0.02 , __a : Dict=1E-12 , __a : List[Any]=1 , __a : List[str]=True , __a : Any=1 , __a : Union[str, Any]=50256 , __a : Dict=50256 , **__a : List[Any] , ) -> Dict: """simple docstring""" __lowercase : Optional[Any] = vocab_size __lowercase : Optional[int] = action_weight __lowercase : int = reward_weight __lowercase : List[str] = value_weight __lowercase : List[Any] = max_position_embeddings __lowercase : Optional[Any] = block_size __lowercase : List[Any] = action_dim __lowercase : List[Any] = observation_dim __lowercase : Dict = transition_dim __lowercase : int = learning_rate __lowercase : Optional[int] = n_layer __lowercase : List[str] = n_head __lowercase : Optional[int] = n_embd __lowercase : Dict = embd_pdrop __lowercase : Optional[Any] = attn_pdrop __lowercase : str = resid_pdrop __lowercase : Optional[int] = initializer_range __lowercase : Any = layer_norm_eps __lowercase : Tuple = kaiming_initializer_range __lowercase : str = use_cache super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
306
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[Any] = (DPMSolverSDEScheduler,) _A : Dict = 10 def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]: """simple docstring""" __lowercase : Any = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**__a ) return config def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = self.scheduler_classes[0] __lowercase : List[str] = self.get_scheduler_config() __lowercase : Any = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[Any] = self.dummy_model() __lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Optional[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Optional[Any] = scheduler.step(__a , __a , __a ) __lowercase : str = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" ) __lowercase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[int] = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Dict = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Dict = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[int] = model(__a , __a ) __lowercase : Optional[int] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : List[str] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config() __lowercase : Optional[int] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : int = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowercase : int = scheduler.scale_model_input(__a , __a ) __lowercase : List[str] = model(__a , __a ) __lowercase : List[str] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : List[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : str = self.scheduler_classes[0] __lowercase : List[Any] = self.get_scheduler_config() __lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : List[str] = self.dummy_model() __lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma __lowercase : str = sample.to(__a ) for t in scheduler.timesteps: __lowercase : List[Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Any = scheduler.step(__a , __a , __a ) __lowercase : Optional[Any] = output.prev_sample __lowercase : Any = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
306
1
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCamelCase : Dict = 16 lowerCamelCase : List[str] = 32 def snake_case_ ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 ): __lowercase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __lowercase : List[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowerCAmelCase_ : List[str] ): # max_length=None => use the model max length (it's actually the default) __lowercase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowercase : Tuple = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowercase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCAmelCase_ : Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowercase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowercase : List[Any] = 16 elif accelerator.mixed_precision != "no": __lowercase : str = 8 else: __lowercase : Union[str, Any] = None return tokenizer.pad( lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , ) # Instantiate dataloaders. __lowercase : Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) __lowercase : List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCamelCase : int = mocked_dataloaders # noqa: F811 def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Any ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCAmelCase_ ) == "1": __lowercase : int = 2 # Initialize accelerator __lowercase : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowercase : Optional[int] = config["""lr"""] __lowercase : Optional[int] = int(config["""num_epochs"""] ) __lowercase : int = int(config["""seed"""] ) __lowercase : Optional[Any] = int(config["""batch_size"""] ) __lowercase : List[str] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase_ ) def inner_training_loop(lowerCAmelCase_ : List[Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowercase : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowercase : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer __lowercase : Optional[int] = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) __lowercase , __lowercase : List[str] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate scheduler __lowercase : int = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowercase , __lowercase , __lowercase , __lowercase , __lowercase : List[Any] = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowercase : Union[str, Any] = model(**lowerCAmelCase_ ) __lowercase : Union[str, Any] = outputs.loss accelerator.backward(lowerCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowercase : Union[str, Any] = model(**lowerCAmelCase_ ) __lowercase : Dict = outputs.logits.argmax(dim=-1 ) __lowercase , __lowercase : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) __lowercase : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , lowerCAmelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def snake_case_ ( ): __lowercase : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) __lowercase : str = parser.parse_args() __lowercase : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
306
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate lowerCamelCase : str = trt.Logger(trt.Logger.WARNING) lowerCamelCase : Any = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) lowerCamelCase : Optional[Any] = logging.getLogger(__name__) lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=3_84, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=1_28, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) lowerCamelCase : Dict = parser.parse_args() if args.tokenizer_name: lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) lowerCamelCase : List[str] = args.per_device_eval_batch_size lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties lowerCamelCase : List[str] = True lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine''' if args.fpaa: lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine''' if args.inta: lowerCamelCase : int = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)] lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: lowerCamelCase : List[str] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) lowerCamelCase : Optional[int] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) lowerCamelCase : Optional[Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ): __lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ ) # start time __lowercase : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Synchronize the stream and take time stream.synchronize() # end time __lowercase : int = time.time() __lowercase : Union[str, Any] = end_time - start_time __lowercase : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. lowerCamelCase : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0] lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1] lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). lowerCamelCase : Dict = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length) def snake_case_ ( lowerCAmelCase_ : int ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __lowercase : str = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __lowercase : List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __lowercase : Any = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ ) __lowercase : List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __lowercase : List[str] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __lowercase : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples lowerCamelCase : Tuple = raw_datasets['''validation'''] # Validation Feature Creation lowerCamelCase : Optional[int] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) lowerCamelCase : Union[str, Any] = default_data_collator lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) lowerCamelCase : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. __lowercase : int = postprocess_qa_predictions( examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __lowercase : Optional[int] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ ) lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def snake_case_ ( lowerCAmelCase_ : str ): return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize # Allocate device memory for inputs and outputs. lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes) lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. lowerCamelCase : Optional[int] = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') lowerCamelCase : int = 0.0 lowerCamelCase : List[str] = 0 lowerCamelCase : List[str] = timeit.default_timer() lowerCamelCase : List[Any] = None for step, batch in enumerate(eval_dataloader): lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs lowerCamelCase : Optional[Any] = torch.tensor(start_logits) lowerCamelCase : List[str] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset)) lowerCamelCase : Dict = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00)) logger.info('''Total Number of Inference = %d''', niter) lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds) lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
306
1
import qiskit def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ): __lowercase : Any = qiskit.Aer.get_backend("""aer_simulator""" ) __lowercase : List[str] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator __lowercase : Any = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : Union[str, Any] = half_adder(1, 1) print(f'''Half Adder Output Qubit Counts: {counts}''')
306
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : str = { '''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''', } class lowerCAmelCase ( __a ): '''simple docstring''' _A : int = '''nllb-moe''' _A : List[str] = ['''past_key_values'''] _A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any: """simple docstring""" __lowercase : int = vocab_size __lowercase : List[Any] = max_position_embeddings __lowercase : Tuple = d_model __lowercase : str = encoder_ffn_dim __lowercase : List[str] = encoder_layers __lowercase : int = encoder_attention_heads __lowercase : List[Any] = decoder_ffn_dim __lowercase : int = decoder_layers __lowercase : Optional[int] = decoder_attention_heads __lowercase : Union[str, Any] = dropout __lowercase : str = attention_dropout __lowercase : Any = activation_dropout __lowercase : List[Any] = activation_function __lowercase : List[str] = init_std __lowercase : Optional[int] = encoder_layerdrop __lowercase : str = decoder_layerdrop __lowercase : Dict = use_cache __lowercase : Optional[Any] = encoder_layers __lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True __lowercase : List[Any] = router_z_loss_coef __lowercase : Tuple = router_aux_loss_coef __lowercase : str = decoder_sparse_step __lowercase : Any = encoder_sparse_step __lowercase : str = num_experts __lowercase : List[Any] = expert_capacity __lowercase : int = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) __lowercase : Optional[int] = router_dtype __lowercase : Any = router_ignore_padding_tokens __lowercase : Optional[Any] = batch_prioritized_routing __lowercase : str = second_expert_policy __lowercase : List[str] = normalize_router_prob_before_dropping __lowercase : List[Any] = moe_eval_capacity_token_fraction __lowercase : List[str] = moe_token_dropout __lowercase : Optional[Any] = output_router_logits super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
306
1
import argparse from collections import defaultdict import yaml lowerCamelCase : Any = '''docs/source/en/_toctree.yml''' def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ): __lowercase : Any = defaultdict(lowerCAmelCase_ ) __lowercase : Optional[int] = [] __lowercase : Union[str, Any] = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(lowerCAmelCase_ ) __lowercase : Optional[int] = new_doc_list __lowercase : str = [key for key, value in counts.items() if value > 1] __lowercase : Optional[Any] = [] for duplicate_key in duplicates: __lowercase : Tuple = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(lowerCAmelCase_ ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) __lowercase : Dict = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(lowerCAmelCase_ ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(lowerCAmelCase_ ) # Sort return overview_doc def snake_case_ ( lowerCAmelCase_ : Any=False ): with open(lowerCAmelCase_ , encoding="""utf-8""" ) as f: __lowercase : Optional[int] = yaml.safe_load(f.read() ) # Get to the API doc __lowercase : str = 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowercase : Dict = content[api_idx]["""sections"""] # Then to the model doc __lowercase : Dict = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 __lowercase : int = api_doc[scheduler_idx]["""sections"""] __lowercase : Optional[int] = clean_doc_toc(lowerCAmelCase_ ) __lowercase : int = False if new_scheduler_doc != scheduler_doc: __lowercase : Dict = True if overwrite: __lowercase : int = new_scheduler_doc if diff: if overwrite: __lowercase : Optional[Any] = api_doc with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(lowerCAmelCase_ , allow_unicode=lowerCAmelCase_ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def snake_case_ ( lowerCAmelCase_ : Dict=False ): with open(lowerCAmelCase_ , encoding="""utf-8""" ) as f: __lowercase : List[str] = yaml.safe_load(f.read() ) # Get to the API doc __lowercase : Any = 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowercase : Optional[Any] = content[api_idx]["""sections"""] # Then to the model doc __lowercase : Tuple = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 __lowercase : Tuple = False __lowercase : Union[str, Any] = api_doc[pipeline_idx]["""sections"""] __lowercase : Optional[int] = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: __lowercase : str = pipeline_doc["""section"""] __lowercase : Optional[Any] = clean_doc_toc(lowerCAmelCase_ ) if overwrite: __lowercase : Union[str, Any] = new_sub_pipeline_doc new_pipeline_docs.append(lowerCAmelCase_ ) # sort overall pipeline doc __lowercase : int = clean_doc_toc(lowerCAmelCase_ ) if new_pipeline_docs != pipeline_docs: __lowercase : List[Any] = True if overwrite: __lowercase : Optional[Any] = new_pipeline_docs if diff: if overwrite: __lowercase : List[str] = api_doc with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(lowerCAmelCase_ , allow_unicode=lowerCAmelCase_ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowerCamelCase : Dict = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
306
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : Optional[Any] = { '''configuration_poolformer''': [ '''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PoolFormerConfig''', '''PoolFormerOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ['''PoolFormerFeatureExtractor'''] lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ '''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PoolFormerForImageClassification''', '''PoolFormerModel''', '''PoolFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
306
1
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCamelCase : List[Any] = logging.get_logger(__name__) @add_end_docstrings(__a ) class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : Any , **__a : Tuple ) -> Any: """simple docstring""" super().__init__(**__a ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : List[str] , __a : Union[str, List[str], "Image", List["Image"]] , **__a : Optional[Any] ) -> Optional[Any]: """simple docstring""" return super().__call__(__a , **__a ) def lowerCAmelCase ( self : Optional[Any] , **__a : Optional[int] ) -> List[str]: """simple docstring""" __lowercase : List[Any] = {} if "candidate_labels" in kwargs: __lowercase : Optional[Any] = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: __lowercase : Union[str, Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def lowerCAmelCase ( self : Optional[Any] , __a : Optional[Any] , __a : Tuple=None , __a : List[Any]="This is a photo of {}." ) -> Optional[Any]: """simple docstring""" __lowercase : Union[str, Any] = load_image(__a ) __lowercase : Tuple = self.image_processor(images=[image] , return_tensors=self.framework ) __lowercase : Optional[int] = candidate_labels __lowercase : Tuple = [hypothesis_template.format(__a ) for x in candidate_labels] __lowercase : int = self.tokenizer(__a , return_tensors=self.framework , padding=__a ) __lowercase : List[Any] = [text_inputs] return inputs def lowerCAmelCase ( self : Dict , __a : Tuple ) -> Optional[int]: """simple docstring""" __lowercase : Optional[int] = model_inputs.pop("""candidate_labels""" ) __lowercase : Optional[int] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , __a ): __lowercase : Any = text_inputs[0] else: # Batching case. __lowercase : int = text_inputs[0][0] __lowercase : Dict = self.model(**__a , **__a ) __lowercase : List[Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def lowerCAmelCase ( self : str , __a : Dict ) -> List[Any]: """simple docstring""" __lowercase : List[Any] = model_outputs.pop("""candidate_labels""" ) __lowercase : Union[str, Any] = model_outputs["""logits"""][0] if self.framework == "pt": __lowercase : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 ) __lowercase : Optional[Any] = probs.tolist() if not isinstance(__a , __a ): __lowercase : List[str] = [scores] elif self.framework == "tf": __lowercase : Union[str, Any] = stable_softmax(__a , axis=-1 ) __lowercase : Tuple = probs.numpy().tolist() else: raise ValueError(F"Unsupported framework: {self.framework}" ) __lowercase : List[str] = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__a , __a ) , key=lambda __a : -x[0] ) ] return result
306
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = 2 __lowercase : Union[str, Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowerCAmelCase_ ) if n > 1: factors.append(lowerCAmelCase_ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
306
1
import os # Precomputes a list of the 100 first triangular numbers lowerCamelCase : Optional[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def snake_case_ ( ): __lowercase : Any = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) ) __lowercase : Optional[int] = os.path.join(lowerCAmelCase_ , """words.txt""" ) __lowercase : int = """""" with open(lowerCAmelCase_ ) as f: __lowercase : List[Any] = f.readline() __lowercase : int = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] __lowercase : int = [ word for word in [sum(ord(lowerCAmelCase_ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(lowerCAmelCase_ ) if __name__ == "__main__": print(solution())
306
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __lowercase : List[str] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __lowercase : Optional[Any] = model(__a )["""last_hidden_state"""] __lowercase : Any = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __a ) # compare the actual values for a slice. __lowercase : Dict = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
306
1
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): assert x is not None assert y is not None __lowercase : Any = len(lowerCAmelCase_ ) __lowercase : Optional[Any] = len(lowerCAmelCase_ ) # declaring the array for storing the dp values __lowercase : Any = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): __lowercase : str = 1 if x[i - 1] == y[j - 1] else 0 __lowercase : Dict = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) __lowercase : List[str] = """""" __lowercase , __lowercase : Dict = m, n while i > 0 and j > 0: __lowercase : List[str] = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: __lowercase : List[Any] = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": lowerCamelCase : Union[str, Any] = '''AGGTAB''' lowerCamelCase : Optional[Any] = '''GXTXAYB''' lowerCamelCase : List[str] = 4 lowerCamelCase : Any = '''GTAB''' lowerCamelCase ,lowerCamelCase : List[Any] = longest_common_subsequence(a, b) print('''len =''', ln, ''', sub-sequence =''', subseq) import doctest doctest.testmod()
306
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Optional[Any] = len(lowerCAmelCase_ ) __lowercase : str = len(lowerCAmelCase_ ) __lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __lowercase : Tuple = True for i in range(lowerCAmelCase_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __lowercase : Optional[Any] = True if a[i].islower(): __lowercase : Dict = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
306
1
import argparse import hashlib # hashlib is only used inside the Test class import struct class lowerCAmelCase : '''simple docstring''' def __init__( self : Tuple , __a : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = data __lowercase : Union[str, Any] = [0x6_7_4_5_2_3_0_1, 0xe_f_c_d_a_b_8_9, 0x9_8_b_a_d_c_f_e, 0x1_0_3_2_5_4_7_6, 0xc_3_d_2_e_1_f_0] @staticmethod def lowerCAmelCase ( __a : Tuple , __a : Any ) -> Optional[int]: """simple docstring""" return ((n << b) | (n >> (32 - b))) & 0xf_f_f_f_f_f_f_f def lowerCAmelCase ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase : int = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64) __lowercase : str = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def lowerCAmelCase ( self : Union[str, Any] , __a : int ) -> Optional[int]: """simple docstring""" __lowercase : int = list(struct.unpack(""">16L""" , __a ) ) + [0] * 64 for i in range(16 , 80 ): __lowercase : Union[str, Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def lowerCAmelCase ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase : Any = self.padding() __lowercase : str = self.split_blocks() for block in self.blocks: __lowercase : str = self.expand_block(__a ) __lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Union[str, Any] = self.h for i in range(0 , 80 ): if 0 <= i < 20: __lowercase : str = (b & c) | ((~b) & d) __lowercase : List[str] = 0x5_a_8_2_7_9_9_9 elif 20 <= i < 40: __lowercase : List[str] = b ^ c ^ d __lowercase : Dict = 0x6_e_d_9_e_b_a_1 elif 40 <= i < 60: __lowercase : Dict = (b & c) | (b & d) | (c & d) __lowercase : Union[str, Any] = 0x8_f_1_b_b_c_d_c elif 60 <= i < 80: __lowercase : Any = b ^ c ^ d __lowercase : Tuple = 0xc_a_6_2_c_1_d_6 __lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Union[str, Any] = ( self.rotate(__a , 5 ) + f + e + k + expanded_block[i] & 0xf_f_f_f_f_f_f_f, a, self.rotate(__a , 30 ), c, d, ) __lowercase : Optional[int] = ( self.h[0] + a & 0xf_f_f_f_f_f_f_f, self.h[1] + b & 0xf_f_f_f_f_f_f_f, self.h[2] + c & 0xf_f_f_f_f_f_f_f, self.h[3] + d & 0xf_f_f_f_f_f_f_f, self.h[4] + e & 0xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def snake_case_ ( ): __lowercase : Any = b"""Test String""" assert SHAaHash(lowerCAmelCase_ ).final_hash() == hashlib.shaa(lowerCAmelCase_ ).hexdigest() # noqa: S324 def snake_case_ ( ): __lowercase : Union[str, Any] = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) __lowercase : Optional[Any] = parser.parse_args() __lowercase : str = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: __lowercase : Optional[Any] = f.read() else: __lowercase : str = bytes(lowerCAmelCase_ , """utf-8""" ) print(SHAaHash(lowerCAmelCase_ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
306
from scipy.stats import spearmanr import datasets lowerCamelCase : List[str] = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' lowerCamelCase : List[str] = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' lowerCamelCase : Union[str, Any] = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]: """simple docstring""" __lowercase : Optional[Any] = spearmanr(__a , __a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
306
1
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCamelCase : Any = logging.get_logger(__name__) lowerCamelCase : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCamelCase : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase : '''simple docstring''' _A : str = field( default=__a , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__a )} ) _A : str = field( default=__a , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) _A : int = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) _A : int = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) _A : int = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) _A : int = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) _A : bool = field( default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) _A : bool = field( default=__a , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) _A : float = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) _A : int = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) _A : int = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) _A : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[int] = '''train''' _A : List[str] = '''dev''' class lowerCAmelCase ( __a ): '''simple docstring''' _A : SquadDataTrainingArguments _A : List[SquadFeatures] _A : Split _A : bool def __init__( self : int , __a : SquadDataTrainingArguments , __a : PreTrainedTokenizer , __a : Optional[int] = None , __a : Union[str, Split] = Split.train , __a : Optional[bool] = False , __a : Optional[str] = None , __a : Optional[str] = "pt" , ) -> List[Any]: """simple docstring""" __lowercase : List[Any] = args __lowercase : List[str] = is_language_sensitive __lowercase : List[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__a , __a ): try: __lowercase : Any = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) __lowercase : int = mode # Load data features from cache or dataset file __lowercase : Optional[int] = """v2""" if args.version_2_with_negative else """v1""" __lowercase : List[Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowercase : Any = cached_features_file + """.lock""" with FileLock(__a ): if os.path.exists(__a ) and not args.overwrite_cache: __lowercase : Tuple = time.time() __lowercase : Any = torch.load(__a ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. __lowercase : Optional[int] = self.old_features["""features"""] __lowercase : List[Any] = self.old_features.get("""dataset""" , __a ) __lowercase : Dict = self.old_features.get("""examples""" , __a ) logger.info( F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" """ future run""" ) else: if mode == Split.dev: __lowercase : str = self.processor.get_dev_examples(args.data_dir ) else: __lowercase : Tuple = self.processor.get_train_examples(args.data_dir ) __lowercase , __lowercase : Union[str, Any] = squad_convert_examples_to_features( examples=self.examples , tokenizer=__a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__a , ) __lowercase : List[str] = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , __a , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : Optional[Any] ) -> List[str]: """simple docstring""" return len(self.features ) def __getitem__( self : str , __a : List[Any] ) -> Dict[str, torch.Tensor]: """simple docstring""" __lowercase : str = self.features[i] __lowercase : List[str] = torch.tensor(feature.input_ids , dtype=torch.long ) __lowercase : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long ) __lowercase : int = torch.tensor(feature.token_type_ids , dtype=torch.long ) __lowercase : Union[str, Any] = torch.tensor(feature.cls_index , dtype=torch.long ) __lowercase : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float ) __lowercase : str = torch.tensor(feature.is_impossible , dtype=torch.float ) __lowercase : List[str] = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} ) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible} ) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: __lowercase : List[str] = torch.tensor(feature.start_position , dtype=torch.long ) __lowercase : int = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} ) return inputs
306
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Any = get_failure_array(lowerCAmelCase_ ) # 2) Step through text searching for pattern __lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern while i < len(lowerCAmelCase_ ): if pattern[j] == text[i]: if j == (len(lowerCAmelCase_ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase : Optional[Any] = failure[j - 1] continue i += 1 return False def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : List[Any] = [0] __lowercase : Optional[Any] = 0 __lowercase : List[Any] = 1 while j < len(lowerCAmelCase_ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase : List[str] = failure[i - 1] continue j += 1 failure.append(lowerCAmelCase_ ) return failure if __name__ == "__main__": # Test 1) lowerCamelCase : Dict = '''abc1abc12''' lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCamelCase : List[Any] = '''ABABX''' lowerCamelCase : List[Any] = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCamelCase : int = '''AAAB''' lowerCamelCase : Optional[int] = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCamelCase : Optional[Any] = '''abcdabcy''' lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCamelCase : Dict = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
306
1
def snake_case_ ( lowerCAmelCase_ : int ): if number > 0: raise ValueError("""input must be a negative integer""" ) __lowercase : Optional[Any] = len(bin(lowerCAmelCase_ )[3:] ) __lowercase : int = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:] __lowercase : Optional[int] = ( ( """1""" + """0""" * (binary_number_length - len(lowerCAmelCase_ )) + twos_complement_number ) if number < 0 else """0""" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
306
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''pixel_values'''] def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None: """simple docstring""" super().__init__(**__a ) __lowercase : Dict = size if size is not None else {"""shortest_edge""": 224} __lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a ) __lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" ) __lowercase : Optional[int] = do_resize __lowercase : Union[str, Any] = size __lowercase : List[Any] = resample __lowercase : Any = do_center_crop __lowercase : Dict = crop_size __lowercase : int = do_rescale __lowercase : Tuple = rescale_factor __lowercase : List[Any] = do_normalize __lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD __lowercase : Union[str, Any] = do_convert_rgb def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray: """simple docstring""" __lowercase : Dict = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) __lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray: """simple docstring""" __lowercase : Tuple = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: """simple docstring""" __lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize __lowercase : Dict = size if size is not None else self.size __lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a ) __lowercase : int = resample if resample is not None else self.resample __lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a ) __lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Tuple = image_mean if image_mean is not None else self.image_mean __lowercase : str = image_std if image_std is not None else self.image_std __lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase : Union[str, Any] = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images] # All transformations expect numpy arrays. __lowercase : Any = [to_numpy_array(__a ) for image in images] if do_resize: __lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_center_crop: __lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: __lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: __lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] __lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images] __lowercase : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=__a , tensor_type=__a )
306
1
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" __lowercase : Dict = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } __lowercase : Optional[int] = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 128, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 142, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(__a ) , __a ) def lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase : Union[str, Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__a ) , x.transpose() ) ) __lowercase : Dict = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" __lowercase : Optional[Any] = np.random.randn(3 , 4 ) __lowercase : Dict = torch.tensor(__a ) self.assertTrue(np.allclose(transpose(__a ) , transpose(__a ).numpy() ) ) __lowercase : Optional[int] = np.random.randn(3 , 4 , 5 ) __lowercase : Optional[int] = torch.tensor(__a ) self.assertTrue(np.allclose(transpose(__a , axes=(1, 2, 0) ) , transpose(__a , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" __lowercase : Union[str, Any] = np.random.randn(3 , 4 ) __lowercase : int = tf.constant(__a ) self.assertTrue(np.allclose(transpose(__a ) , transpose(__a ).numpy() ) ) __lowercase : Optional[Any] = np.random.randn(3 , 4 , 5 ) __lowercase : str = tf.constant(__a ) self.assertTrue(np.allclose(transpose(__a , axes=(1, 2, 0) ) , transpose(__a , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase : Any = np.random.randn(3 , 4 ) __lowercase : Any = jnp.array(__a ) self.assertTrue(np.allclose(transpose(__a ) , np.asarray(transpose(__a ) ) ) ) __lowercase : Any = np.random.randn(3 , 4 , 5 ) __lowercase : Dict = jnp.array(__a ) self.assertTrue(np.allclose(transpose(__a , axes=(1, 2, 0) ) , np.asarray(transpose(__a , axes=(1, 2, 0) ) ) ) ) def lowerCAmelCase ( self : int ) -> Optional[int]: """simple docstring""" __lowercase : Optional[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__a , (4, 3) ) , np.reshape(__a , (4, 3) ) ) ) __lowercase : Optional[int] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__a , (12, 5) ) , np.reshape(__a , (12, 5) ) ) ) @require_torch def lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" __lowercase : List[Any] = np.random.randn(3 , 4 ) __lowercase : str = torch.tensor(__a ) self.assertTrue(np.allclose(reshape(__a , (4, 3) ) , reshape(__a , (4, 3) ).numpy() ) ) __lowercase : int = np.random.randn(3 , 4 , 5 ) __lowercase : Dict = torch.tensor(__a ) self.assertTrue(np.allclose(reshape(__a , (12, 5) ) , reshape(__a , (12, 5) ).numpy() ) ) @require_tf def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase : List[Any] = np.random.randn(3 , 4 ) __lowercase : Any = tf.constant(__a ) self.assertTrue(np.allclose(reshape(__a , (4, 3) ) , reshape(__a , (4, 3) ).numpy() ) ) __lowercase : Union[str, Any] = np.random.randn(3 , 4 , 5 ) __lowercase : List[Any] = tf.constant(__a ) self.assertTrue(np.allclose(reshape(__a , (12, 5) ) , reshape(__a , (12, 5) ).numpy() ) ) @require_flax def lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase : str = np.random.randn(3 , 4 ) __lowercase : Optional[int] = jnp.array(__a ) self.assertTrue(np.allclose(reshape(__a , (4, 3) ) , np.asarray(reshape(__a , (4, 3) ) ) ) ) __lowercase : Optional[int] = np.random.randn(3 , 4 , 5 ) __lowercase : Dict = jnp.array(__a ) self.assertTrue(np.allclose(reshape(__a , (12, 5) ) , np.asarray(reshape(__a , (12, 5) ) ) ) ) def lowerCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase : List[Any] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__a ) , np.squeeze(__a ) ) ) __lowercase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__a , axis=2 ) , np.squeeze(__a , axis=2 ) ) ) @require_torch def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : List[str] = np.random.randn(1 , 3 , 4 ) __lowercase : int = torch.tensor(__a ) self.assertTrue(np.allclose(squeeze(__a ) , squeeze(__a ).numpy() ) ) __lowercase : List[str] = np.random.randn(1 , 4 , 1 , 5 ) __lowercase : Optional[Any] = torch.tensor(__a ) self.assertTrue(np.allclose(squeeze(__a , axis=2 ) , squeeze(__a , axis=2 ).numpy() ) ) @require_tf def lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" __lowercase : int = np.random.randn(1 , 3 , 4 ) __lowercase : Tuple = tf.constant(__a ) self.assertTrue(np.allclose(squeeze(__a ) , squeeze(__a ).numpy() ) ) __lowercase : Tuple = np.random.randn(1 , 4 , 1 , 5 ) __lowercase : Any = tf.constant(__a ) self.assertTrue(np.allclose(squeeze(__a , axis=2 ) , squeeze(__a , axis=2 ).numpy() ) ) @require_flax def lowerCAmelCase ( self : str ) -> int: """simple docstring""" __lowercase : Union[str, Any] = np.random.randn(1 , 3 , 4 ) __lowercase : int = jnp.array(__a ) self.assertTrue(np.allclose(squeeze(__a ) , np.asarray(squeeze(__a ) ) ) ) __lowercase : Tuple = np.random.randn(1 , 4 , 1 , 5 ) __lowercase : Any = jnp.array(__a ) self.assertTrue(np.allclose(squeeze(__a , axis=2 ) , np.asarray(squeeze(__a , axis=2 ) ) ) ) def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase : Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__a , axis=1 ) , np.expand_dims(__a , axis=1 ) ) ) @require_torch def lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" __lowercase : str = np.random.randn(3 , 4 ) __lowercase : Optional[int] = torch.tensor(__a ) self.assertTrue(np.allclose(expand_dims(__a , axis=1 ) , expand_dims(__a , axis=1 ).numpy() ) ) @require_tf def lowerCAmelCase ( self : str ) -> int: """simple docstring""" __lowercase : str = np.random.randn(3 , 4 ) __lowercase : Tuple = tf.constant(__a ) self.assertTrue(np.allclose(expand_dims(__a , axis=1 ) , expand_dims(__a , axis=1 ).numpy() ) ) @require_flax def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase : Any = np.random.randn(3 , 4 ) __lowercase : Union[str, Any] = jnp.array(__a ) self.assertTrue(np.allclose(expand_dims(__a , axis=1 ) , np.asarray(expand_dims(__a , axis=1 ) ) ) )
306
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ): __lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ ) return new.join(lowerCAmelCase_ ) def snake_case_ ( lowerCAmelCase_ : List[Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = {} __lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." ) if "res_path" in key: __lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): __lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): __lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 ) __lowercase : Dict = value.float() return upgrade @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ): from dall_e import Encoder __lowercase : Any = Encoder() if os.path.exists(lowerCAmelCase_ ): __lowercase : List[Any] = torch.load(lowerCAmelCase_ ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : int = ckpt.state_dict() encoder.load_state_dict(lowerCAmelCase_ ) if config_path is not None: __lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ ) else: __lowercase : List[str] = FlavaImageCodebookConfig() __lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval() __lowercase : List[Any] = encoder.state_dict() __lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ ) hf_model.load_state_dict(lowerCAmelCase_ ) __lowercase : Dict = hf_model.state_dict() __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(lowerCAmelCase_ ) else: return hf_state_dict if __name__ == "__main__": lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCamelCase : Union[str, Any] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
306
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __lowercase : List[str] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __lowercase : Optional[Any] = model(__a )["""last_hidden_state"""] __lowercase : Any = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __a ) # compare the actual values for a slice. __lowercase : Dict = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
306
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCamelCase : Tuple = logging.get_logger(__name__) logging.set_verbosity_info() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if "xprophetnet" in prophetnet_checkpoint_path: __lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) else: __lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) __lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""] __lowercase : Optional[int] = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: __lowercase : Tuple = key.split(""".""" ) if attributes[0] == "lm_head": __lowercase : str = prophet __lowercase : List[str] = prophet_old else: __lowercase : Tuple = prophet.prophetnet __lowercase : Union[str, Any] = prophet_old.model __lowercase : Optional[Any] = False for attribute in attributes: if attribute in mapping: __lowercase : Optional[int] = mapping[attribute] if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0: __lowercase : str = attribute elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : List[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __lowercase : Any = old_model.weight logger.info(F"{attribute} is initialized." ) __lowercase : Any = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __lowercase : Dict = old_model.bias logger.info(F"{attribute} is initialized" ) __lowercase : int = True break elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ): __lowercase : Dict = old_model.in_proj_weight.shape[0] // 3 __lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __lowercase : int = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." __lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) __lowercase : int = True break if attribute.isdigit(): __lowercase : Tuple = model[int(lowerCAmelCase_ )] __lowercase : int = old_model[int(lowerCAmelCase_ )] else: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if old_attribute == "": __lowercase : int = old_model else: if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError(F"{old_model} does not have {old_attribute}" ) __lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if not is_key_init: raise ValueError(F"{key} was not correctly initialized!" ) print(F"Saving model to {pytorch_dump_folder_path}" ) prophet.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase : Any = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
306
1
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) lowerCamelCase : Any = logging.getLogger() def snake_case_ ( ): __lowercase : str = argparse.ArgumentParser() parser.add_argument("""-f""" ) __lowercase : Optional[Any] = parser.parse_args() return args.f class lowerCAmelCase ( __a ): '''simple docstring''' def lowerCAmelCase ( self : List[str] ) -> None: """simple docstring""" __lowercase : Union[str, Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) def lowerCAmelCase ( self : List[Any] , __a : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : Union[str, Any] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , """run_glue_deebert.py""" ) with patch.object(__a , """argv""" , __a ): __lowercase : Dict = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__a , 0.666 ) @slow @require_torch_non_multi_gpu def lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" __lowercase : int = """ --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage """.split() self.run_and_check(__a ) __lowercase : List[str] = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(__a ) __lowercase : str = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(__a )
306
def snake_case_ ( lowerCAmelCase_ : int = 200 ): __lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200] __lowercase : List[str] = [0] * (pence + 1) __lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowerCAmelCase_ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
306
1
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging lowerCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : List[str] , __a : WhisperForConditionalGeneration , __a : WhisperProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ) -> int: """simple docstring""" super().__init__() if safety_checker is None: logger.warning( F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" """ that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered""" """ results in services or applications open to the public. Both the diffusers team and Hugging Face""" """ strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling""" """ it only for use-cases that involve analyzing network behavior or auditing its results. For more""" """ information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" ) self.register_modules( speech_model=__a , speech_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , feature_extractor=__a , ) def lowerCAmelCase ( self : Dict , __a : Optional[Union[str, int]] = "auto" ) -> str: """simple docstring""" if slice_size == "auto": __lowercase : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__a ) def lowerCAmelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" self.enable_attention_slicing(__a ) @torch.no_grad() def __call__( self : List[str] , __a : str , __a : Optional[Any]=16000 , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Tuple , ) -> Optional[int]: """simple docstring""" __lowercase : int = self.speech_processor.feature_extractor( __a , return_tensors="""pt""" , sampling_rate=__a ).input_features.to(self.device ) __lowercase : Tuple = self.speech_model.generate(__a , max_length=480000 ) __lowercase : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(__a , skip_special_tokens=__a , normalize=__a )[ 0 ] if isinstance(__a , __a ): __lowercase : Tuple = 1 elif isinstance(__a , __a ): __lowercase : List[str] = len(__a ) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__a )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(__a )}." ) # get prompt text embeddings __lowercase : Optional[Any] = self.tokenizer( __a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __lowercase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __lowercase : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F" {self.tokenizer.model_max_length} tokens: {removed_text}" ) __lowercase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length] __lowercase : Any = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __lowercase , __lowercase , __lowercase : Optional[int] = text_embeddings.shape __lowercase : str = text_embeddings.repeat(1 , __a , 1 ) __lowercase : Tuple = text_embeddings.view(bs_embed * num_images_per_prompt , __a , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __lowercase : Optional[Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __lowercase : List[str] if negative_prompt is None: __lowercase : List[Any] = [""""""] * batch_size elif type(__a ) is not type(__a ): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !=" F" {type(__a )}." ) elif isinstance(__a , __a ): __lowercase : str = [negative_prompt] elif batch_size != len(__a ): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" """ the batch size of `prompt`.""" ) else: __lowercase : Optional[Any] = negative_prompt __lowercase : List[str] = text_input_ids.shape[-1] __lowercase : Optional[int] = self.tokenizer( __a , padding="""max_length""" , max_length=__a , truncation=__a , return_tensors="""pt""" , ) __lowercase : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __lowercase : str = uncond_embeddings.shape[1] __lowercase : int = uncond_embeddings.repeat(1 , __a , 1 ) __lowercase : int = uncond_embeddings.view(batch_size * num_images_per_prompt , __a , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowercase : Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __lowercase : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __lowercase : List[str] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __lowercase : Optional[Any] = torch.randn(__a , generator=__a , device="""cpu""" , dtype=__a ).to( self.device ) else: __lowercase : List[Any] = torch.randn(__a , generator=__a , device=self.device , dtype=__a ) else: if latents.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) __lowercase : Any = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__a ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __lowercase : str = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __lowercase : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __lowercase : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __lowercase : Tuple = {} if accepts_eta: __lowercase : Any = eta for i, t in enumerate(self.progress_bar(__a ) ): # expand the latents if we are doing classifier free guidance __lowercase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowercase : str = self.scheduler.scale_model_input(__a , __a ) # predict the noise residual __lowercase : Tuple = self.unet(__a , __a , encoder_hidden_states=__a ).sample # perform guidance if do_classifier_free_guidance: __lowercase , __lowercase : int = noise_pred.chunk(2 ) __lowercase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __lowercase : List[str] = self.scheduler.step(__a , __a , __a , **__a ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__a , __a , __a ) __lowercase : Union[str, Any] = 1 / 0.18215 * latents __lowercase : Optional[Any] = self.vae.decode(__a ).sample __lowercase : Dict = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __lowercase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __lowercase : Tuple = self.numpy_to_pil(__a ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a )
306
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any: """simple docstring""" __lowercase : Optional[int] = parent __lowercase : List[str] = out_indices if out_indices is not None else [4] __lowercase : Optional[int] = stage_names __lowercase : Any = out_features __lowercase : Optional[Any] = backbone __lowercase : Optional[Any] = batch_size __lowercase : Union[str, Any] = image_size __lowercase : List[str] = num_channels __lowercase : str = use_pretrained_backbone __lowercase : str = is_training def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : str = self.get_config() return config, pixel_values def lowerCAmelCase ( self : int ) -> str: """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict: """simple docstring""" __lowercase : Dict = TimmBackbone(config=__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowercase : Optional[Any] = model(__a ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Union[str, Any] = self.prepare_config_and_inputs() __lowercase , __lowercase : str = config_and_inputs __lowercase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch @require_timm class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ): '''simple docstring''' _A : List[Any] = (TimmBackbone,) if is_torch_available() else () _A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {} _A : List[Any] = False _A : List[str] = False _A : Any = False _A : Optional[Any] = False def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : str = TimmBackboneModelTester(self ) __lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a ) def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : Tuple = """resnet18""" __lowercase : Optional[int] = """microsoft/resnet-18""" __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a ) __lowercase : Dict = AutoBackbone.from_pretrained(__a ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] ) __lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("""TimmBackbone doesn't support feed forward chunking""" ) def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" ) def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("""TimmBackbone initialization is managed on the timm side""" ) def lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" ) def lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't support output_attentions.""" ) def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Safetensors is not supported by timm.""" ) def lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[Any] = model_class(__a ) __lowercase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" __lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Optional[Any] = True __lowercase : Union[str, Any] = self.has_attentions # no need to test all models as different heads yield the same functionality __lowercase : Union[str, Any] = self.all_model_classes[0] __lowercase : List[Any] = model_class(__a ) model.to(__a ) __lowercase : Optional[Any] = self._prepare_for_class(__a , __a ) __lowercase : Union[str, Any] = model(**__a ) __lowercase : Optional[int] = outputs[0][-1] # Encoder-/Decoder-only models __lowercase : Any = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __lowercase : Optional[int] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__a ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : List[str] = model_class(__a ) model.to(__a ) model.eval() __lowercase : int = model(**__a ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __lowercase : Any = copy.deepcopy(__a ) __lowercase : Dict = None __lowercase : Tuple = model_class(__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model(**__a ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __lowercase : List[str] = copy.deepcopy(__a ) __lowercase : Optional[Any] = False __lowercase : str = model_class(__a ) model.to(__a ) model.eval() __lowercase : List[Any] = model(**__a )
306
1
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : List[str] = PriorTransformer _A : Optional[int] = '''hidden_states''' @property def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" __lowercase : Dict = 4 __lowercase : List[str] = 8 __lowercase : str = 7 __lowercase : Optional[Any] = floats_tensor((batch_size, embedding_dim) ).to(__a ) __lowercase : str = floats_tensor((batch_size, embedding_dim) ).to(__a ) __lowercase : List[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__a ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowerCAmelCase ( self : List[Any] , __a : Optional[int]=0 ) -> Dict: """simple docstring""" torch.manual_seed(__a ) __lowercase : str = 4 __lowercase : str = 8 __lowercase : Dict = 7 __lowercase : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(__a ) __lowercase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(__a ) __lowercase : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__a ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" return (4, 8) @property def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return (4, 8) def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : int = { """num_attention_heads""": 2, """attention_head_dim""": 4, """num_layers""": 2, """embedding_dim""": 8, """num_embeddings""": 7, """additional_embeddings""": 4, } __lowercase : List[Any] = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase ( self : List[str] ) -> Any: """simple docstring""" __lowercase , __lowercase : List[str] = PriorTransformer.from_pretrained( """hf-internal-testing/prior-dummy""" , output_loading_info=__a ) self.assertIsNotNone(__a ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__a ) __lowercase : Optional[Any] = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" __lowercase , __lowercase : Dict = self.prepare_init_args_and_inputs_for_common() __lowercase : List[Any] = self.model_class(**__a ) __lowercase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : List[Any] = ["""hidden_states""", """timestep"""] self.assertListEqual(arg_names[:2] , __a ) def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : Optional[int] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" ) __lowercase : List[str] = model.to(__a ) if hasattr(__a , """set_default_attn_processor""" ): model.set_default_attn_processor() __lowercase : str = self.get_dummy_seed_input() with torch.no_grad(): __lowercase : int = model(**__a )[0] __lowercase : List[str] = output[0, :5].flatten().cpu() print(__a ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase : Union[str, Any] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] ) self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) ) @slow class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Tuple , __a : Union[str, Any]=1 , __a : str=768 , __a : Optional[int]=77 , __a : Union[str, Any]=0 ) -> Optional[Any]: """simple docstring""" torch.manual_seed(__a ) __lowercase : Union[str, Any] = batch_size __lowercase : Any = embedding_dim __lowercase : int = num_embeddings __lowercase : List[Any] = torch.randn((batch_size, embedding_dim) ).to(__a ) __lowercase : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(__a ) __lowercase : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__a ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ] ) def lowerCAmelCase ( self : Union[str, Any] , __a : str , __a : str ) -> int: """simple docstring""" __lowercase : Any = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" ) model.to(__a ) __lowercase : Union[str, Any] = self.get_dummy_seed_input(seed=__a ) with torch.no_grad(): __lowercase : int = model(**__a )[0] assert list(sample.shape ) == [1, 768] __lowercase : Any = sample[0, :8].flatten().cpu() print(__a ) __lowercase : Optional[int] = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-3 )
306
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : str = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } lowerCamelCase : Optional[Any] = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ): for attribute in key.split(""".""" ): __lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: __lowercase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowercase : Dict = value elif weight_type == "weight_g": __lowercase : Union[str, Any] = value elif weight_type == "weight_v": __lowercase : List[Any] = value elif weight_type == "bias": __lowercase : int = value elif weight_type == "running_mean": __lowercase : List[Any] = value elif weight_type == "running_var": __lowercase : int = value elif weight_type == "num_batches_tracked": __lowercase : int = value elif weight_type == "inv_freq": __lowercase : Optional[Any] = value else: __lowercase : Any = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ): __lowercase : str = [] __lowercase : Any = fairseq_model.state_dict() __lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Optional[Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __lowercase : Tuple = True if "*" in mapped_key: __lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2] __lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ ) if "pos_bias_u" in name: __lowercase : Any = None elif "pos_bias_v" in name: __lowercase : Tuple = None elif "weight_g" in name: __lowercase : Union[str, Any] = """weight_g""" elif "weight_v" in name: __lowercase : Dict = """weight_v""" elif "bias" in name: __lowercase : Union[str, Any] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowercase : str = """weight""" elif "running_mean" in name: __lowercase : str = """running_mean""" elif "inv_freq" in name: __lowercase : List[Any] = """inv_freq""" elif "running_var" in name: __lowercase : Any = """running_var""" elif "num_batches_tracked" in name: __lowercase : Any = """num_batches_tracked""" else: __lowercase : Optional[int] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(F"Unused weights: {unused_weights}" ) def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ): __lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1] __lowercase : int = name.split(""".""" ) __lowercase : Optional[Any] = int(items[0] ) __lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowercase : List[str] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __lowercase : Dict = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ): if config_path is not None: __lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" ) else: __lowercase : List[Any] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __lowercase : Tuple = """rotary""" if is_finetuned: if dict_path: __lowercase : Any = Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : List[Any] = target_dict.pad_index __lowercase : Optional[int] = target_dict.bos_index __lowercase : List[Any] = target_dict.eos_index __lowercase : List[str] = len(target_dict.symbols ) __lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" ) if not os.path.isdir(lowerCAmelCase_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) __lowercase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched __lowercase : int = 0 __lowercase : Any = 1 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Dict = WavaVecaCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , ) __lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False __lowercase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) __lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) __lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ ) else: __lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ ) if is_finetuned: __lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" ) __lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ ) __lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ ) __lowercase : Dict = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCamelCase : Any = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
306
1
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : int | str ): __lowercase : Optional[Any] = str(lowerCAmelCase_ ) return n == n[::-1] def snake_case_ ( lowerCAmelCase_ : int = 1000000 ): __lowercase : Any = 0 for i in range(1 , lowerCAmelCase_ ): if is_palindrome(lowerCAmelCase_ ) and is_palindrome(bin(lowerCAmelCase_ ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
306
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError("""String lengths must match!""" ) __lowercase : str = 0 for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
306
1
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class lowerCAmelCase ( __a ): '''simple docstring''' _A : str = CustomTokenizer pass
306
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case_ ( lowerCAmelCase_ : Tuple ): if isinstance(lowerCAmelCase_ , collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCAmelCase : '''simple docstring''' def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" pass def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]: """simple docstring""" __lowercase : List[str] = np.abs((a - b) ).max() self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." ) def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str: """simple docstring""" __lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a ) __lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = after_output[0] __lowercase : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-3 ) def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : str = self.get_vision_text_model(__a , __a ) __lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Union[str, Any] = model( input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a ) __lowercase : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(__a ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase : Optional[int] = to_atuple(vision_model.config.image_size ) __lowercase : List[str] = to_atuple(vision_model.config.patch_size ) __lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase : Dict = output.text_model_output.attentions self.assertEqual(len(__a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]: """simple docstring""" pt_model.to(__a ) pt_model.eval() # prepare inputs __lowercase : Union[str, Any] = inputs_dict __lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): __lowercase : Union[str, Any] = pt_model(**__a ).to_tuple() __lowercase : Tuple = fx_model(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__a ) __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a ) __lowercase : Dict = fx_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__a ) __lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a ) pt_model_loaded.to(__a ) pt_model_loaded.eval() with torch.no_grad(): __lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 ) def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = VisionTextDualEncoderModel(__a ) __lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a ) __lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a ) __lowercase : Any = fx_state self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str: """simple docstring""" __lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a ) __lowercase : Dict = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params ) self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__a ) def lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = self.prepare_config_and_inputs() self.check_save_load(**__a ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" __lowercase : str = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__a ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() __lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" ) __lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" ) __lowercase : Dict = config_inputs_dict self.check_equivalence_pt_to_flax(__a , __a , __a ) self.check_equivalence_flax_to_pt(__a , __a , __a ) @slow def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs() __lowercase : Dict = model_a(**__a ) __lowercase : Any = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__a ) __lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Optional[int] = model_a(**__a ) __lowercase : Tuple = after_outputs[0] __lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-5 ) @require_flax class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" __lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : int = 13 __lowercase : Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : Tuple = random_attention_mask([batch_size, 4] ) __lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict: """simple docstring""" __lowercase : int = FlaxViTModel(__a ) __lowercase : List[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = FlaxViTModelTester(self ) __lowercase : str = FlaxBertModelTester(self ) __lowercase : List[str] = vit_model_tester.prepare_config_and_inputs() __lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Optional[int] = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : Tuple = 13 __lowercase : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : List[Any] = random_attention_mask([batch_size, 4] ) __lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : Dict = FlaxCLIPVisionModel(__a ) __lowercase : Optional[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : List[Any] = FlaxCLIPVisionModelTester(self ) __lowercase : Optional[Any] = FlaxBertModelTester(self ) __lowercase : Any = clip_model_tester.prepare_config_and_inputs() __lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Dict = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) __lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __lowercase : Tuple = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" ) __lowercase : Optional[int] = model(**__a ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
306
1
import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor lowerCamelCase : Dict = logging.get_logger(__name__) class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Dict ) -> None: """simple docstring""" warnings.warn( """The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DonutImageProcessor instead.""" , __a , ) super().__init__(*__a , **__a )
306
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
306
1
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ): return EnvironmentCommand() def snake_case_ ( lowerCAmelCase_ : Tuple ): return EnvironmentCommand(args.accelerate_config_file ) class lowerCAmelCase ( __a ): '''simple docstring''' @staticmethod def lowerCAmelCase ( __a : ArgumentParser ) -> str: """simple docstring""" __lowercase : Union[str, Any] = parser.add_parser("""env""" ) download_parser.set_defaults(func=__a ) download_parser.add_argument( """--accelerate-config_file""" , default=__a , help="""The accelerate config file to use for the default values in the launching script.""" , ) download_parser.set_defaults(func=__a ) def __init__( self : List[str] , __a : str , *__a : Union[str, Any] ) -> None: """simple docstring""" __lowercase : Optional[Any] = accelerate_config_file def lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase : Dict = """not installed""" if is_safetensors_available(): import safetensors __lowercase : int = safetensors.__version__ elif importlib.util.find_spec("""safetensors""" ) is not None: import safetensors __lowercase : Union[str, Any] = F"{safetensors.__version__} but is ignored because of PyTorch version too old." __lowercase : Union[str, Any] = """not installed""" __lowercase : int = """not found""" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __lowercase : Optional[int] = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(__a ): __lowercase : Optional[Any] = load_config_from_file(self._accelerate_config_file ).to_dict() __lowercase : List[Any] = ( """\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] ) if isinstance(__a , __a ) else F"\t{accelerate_config}" ) __lowercase : int = """not installed""" __lowercase : Union[str, Any] = """NA""" if is_torch_available(): import torch __lowercase : Optional[Any] = torch.__version__ __lowercase : List[str] = torch.cuda.is_available() __lowercase : Union[str, Any] = """not installed""" __lowercase : Tuple = """NA""" if is_tf_available(): import tensorflow as tf __lowercase : Dict = tf.__version__ try: # deprecated in v2.1 __lowercase : Any = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __lowercase : int = bool(tf.config.list_physical_devices("""GPU""" ) ) __lowercase : Optional[Any] = """not installed""" __lowercase : List[Any] = """not installed""" __lowercase : str = """not installed""" __lowercase : Tuple = """NA""" if is_flax_available(): import flax import jax import jaxlib __lowercase : int = flax.__version__ __lowercase : int = jax.__version__ __lowercase : List[str] = jaxlib.__version__ __lowercase : str = jax.lib.xla_bridge.get_backend().platform __lowercase : List[Any] = { """`transformers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Huggingface_hub version""": huggingface_hub.__version__, """Safetensors version""": F"{safetensors_version}", """Accelerate version""": F"{accelerate_version}", """Accelerate config""": F"{accelerate_config_str}", """PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})", """Tensorflow version (GPU?)""": F"{tf_version} ({tf_cuda_available})", """Flax version (CPU?/GPU?/TPU?)""": F"{flax_version} ({jax_backend})", """Jax version""": F"{jax_version}", """JaxLib version""": F"{jaxlib_version}", """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(__a ) ) return info @staticmethod def lowerCAmelCase ( __a : List[Any] ) -> List[str]: """simple docstring""" return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
306
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex lowerCamelCase : Union[str, Any] = 10 lowerCamelCase : List[str] = 2_56 def snake_case_ ( lowerCAmelCase_ : List[str] ): if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS: return None __lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ ) for token in set(lowerCAmelCase_ ): min_hash.update(token.encode() ) return min_hash def snake_case_ ( lowerCAmelCase_ : str ): return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0} class lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] , *, __a : float = 0.85 , ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[Any] = duplication_jaccard_threshold __lowercase : Optional[Any] = NUM_PERM __lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __lowercase : List[str] = defaultdict(__a ) def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None: """simple docstring""" __lowercase : List[Any] = self._index.query(__a ) if code_key in self._index.keys: print(F"Duplicate key {code_key}" ) return self._index.insert(__a , __a ) if len(__a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__a ) break else: self._duplicate_clusters[close_duplicates[0]].add(__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]: """simple docstring""" __lowercase : Dict = [] for base, duplicates in self._duplicate_clusters.items(): __lowercase : List[str] = [base] + list(__a ) # reformat the cluster to be a list of dict __lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(__a ) return duplicate_clusters def lowerCAmelCase ( self : Any , __a : int ) -> None: """simple docstring""" __lowercase : Tuple = self.get_duplicate_clusters() with open(__a , """w""" ) as f: json.dump(__a , __a ) def snake_case_ ( lowerCAmelCase_ : str ): __lowercase , __lowercase : Union[str, Any] = element __lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ): with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ): if data is not None: yield data def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ): __lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ): di.add(lowerCAmelCase_ , lowerCAmelCase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : List[str] = get_tokens(lowerCAmelCase_ ) __lowercase : Dict = get_tokens(lowerCAmelCase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowerCamelCase : List[str] = None def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ): __lowercase : Union[str, Any] = [] for elementa in cluster: __lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: __lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __lowercase : Dict = 1 extremes.append(lowerCAmelCase_ ) return extremes def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ): global _shared_dataset __lowercase : Tuple = dataset __lowercase : Optional[int] = [] __lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ): extremes_list.append(lowerCAmelCase_ ) return extremes_list def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ): __lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} __lowercase : int = {} __lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for extremes in extremes_clusters: for element in extremes: __lowercase : Optional[Any] = element __lowercase : int = duplicate_indices - set(extreme_dict.keys() ) __lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __lowercase : List[str] = element["""base_index"""] in extreme_dict if element["is_extreme"]: __lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""] print(F"Original dataset size: {len(lowerCAmelCase_ )}" ) print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" ) print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" ) return ds_filter, duplicate_clusters
306
1
def snake_case_ ( lowerCAmelCase_ : List[str] ): __lowercase , __lowercase : str = [], [] while len(lowerCAmelCase_ ) > 1: __lowercase , __lowercase : Dict = min(lowerCAmelCase_ ), max(lowerCAmelCase_ ) start.append(lowerCAmelCase_ ) end.append(lowerCAmelCase_ ) collection.remove(lowerCAmelCase_ ) collection.remove(lowerCAmelCase_ ) end.reverse() return start + collection + end if __name__ == "__main__": lowerCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase : int = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
306
from ...processing_utils import ProcessorMixin class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''image_processor''', '''feature_extractor'''] _A : List[Any] = '''TvltImageProcessor''' _A : Optional[int] = '''TvltFeatureExtractor''' def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]: """simple docstring""" super().__init__(image_processor=__a , feature_extractor=__a ) __lowercase : Union[str, Any] = image_processor __lowercase : Tuple = feature_extractor def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict: """simple docstring""" if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) __lowercase : Tuple = None if images is not None: __lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a ) if images_mixed is not None: __lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a ) if audio is not None: __lowercase : Optional[Any] = self.feature_extractor( __a , *__a , sampling_rate=__a , mask_audio=__a , **__a ) __lowercase : Tuple = {} if audio is not None: output_dict.update(__a ) if images is not None: output_dict.update(__a ) if images_mixed_dict is not None: output_dict.update(__a ) return output_dict @property def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase : int = self.image_processor.model_input_names __lowercase : Union[str, Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
306
1
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ): if index == r: for j in range(lowerCAmelCase_ ): print(data[j] , end=""" """ ) print(""" """ ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __lowercase : List[str] = arr[i] combination_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index + 1 , lowerCAmelCase_ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ): # A temporary array to store all combination one by one __lowercase : List[Any] = [0] * r # Print all combination using temporary array 'data[]' combination_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 0 , lowerCAmelCase_ , 0 ) if __name__ == "__main__": # Driver code to check the function above lowerCamelCase : Tuple = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
306
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class lowerCAmelCase : '''simple docstring''' def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = parent __lowercase : int = batch_size __lowercase : Any = seq_length __lowercase : str = is_training __lowercase : str = use_input_mask __lowercase : Optional[int] = use_token_type_ids __lowercase : List[Any] = use_labels __lowercase : Optional[Any] = vocab_size __lowercase : int = hidden_size __lowercase : List[Any] = num_hidden_layers __lowercase : Dict = num_attention_heads __lowercase : Any = intermediate_size __lowercase : Dict = hidden_act __lowercase : Union[str, Any] = hidden_dropout_prob __lowercase : List[Any] = attention_probs_dropout_prob __lowercase : List[str] = max_position_embeddings __lowercase : Union[str, Any] = type_vocab_size __lowercase : Dict = type_sequence_label_size __lowercase : Union[str, Any] = initializer_range __lowercase : List[Any] = num_labels __lowercase : str = num_choices __lowercase : Tuple = scope def lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : int = None if self.use_input_mask: __lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : str = None __lowercase : Optional[Any] = None __lowercase : Tuple = None if self.use_labels: __lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() __lowercase : str = model(__a , attention_mask=__a ) __lowercase : List[Any] = model(__a ) __lowercase : Optional[int] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : List[str] = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() __lowercase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase : Tuple = self.num_labels __lowercase : Any = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase : Any = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) : List[str] = config_and_inputs __lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = False _A : Any = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) _A : Optional[Any] = () _A : List[Any] = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) _A : Optional[Any] = True def lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" __lowercase : Optional[int] = EsmModelTester(self ) __lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase : Union[str, Any] = type self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : int ) -> Any: """simple docstring""" __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : List[str] = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : List[str] = EsmEmbeddings(config=__a ) __lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) __lowercase : int = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) __lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : Optional[Any] = EsmEmbeddings(config=__a ) __lowercase : Optional[int] = torch.empty(2 , 4 , 30 ) __lowercase : Tuple = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] __lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) __lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @require_torch class lowerCAmelCase ( __a ): '''simple docstring''' @slow def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase : List[str] = model(__a )[0] __lowercase : Union[str, Any] = 33 __lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) __lowercase : List[Any] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __lowercase : Any = model(__a )[0] # compare the actual values for a slice. __lowercase : int = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
306
1
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig lowerCamelCase : List[str] = logging.get_logger(__name__) # General docstring lowerCamelCase : List[Any] = '''MobileNetV1Config''' # Base docstring lowerCamelCase : Optional[int] = '''google/mobilenet_v1_1.0_224''' lowerCamelCase : Optional[Any] = [1, 10_24, 7, 7] # Image classification docstring lowerCamelCase : Any = '''google/mobilenet_v1_1.0_224''' lowerCamelCase : Optional[Any] = '''tabby, tabby cat''' lowerCamelCase : Tuple = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int]=None ): __lowercase : List[Any] = {} if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : Any = model.mobilenet_va else: __lowercase : Tuple = model __lowercase : str = """MobilenetV1/Conv2d_0/""" __lowercase : List[Any] = backbone.conv_stem.convolution.weight __lowercase : List[Any] = backbone.conv_stem.normalization.bias __lowercase : Optional[Any] = backbone.conv_stem.normalization.weight __lowercase : List[Any] = backbone.conv_stem.normalization.running_mean __lowercase : Any = backbone.conv_stem.normalization.running_var for i in range(13 ): __lowercase : int = i + 1 __lowercase : Optional[Any] = i * 2 __lowercase : Optional[Any] = backbone.layer[pt_index] __lowercase : Dict = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" __lowercase : Union[str, Any] = pointer.convolution.weight __lowercase : Optional[Any] = pointer.normalization.bias __lowercase : List[str] = pointer.normalization.weight __lowercase : List[str] = pointer.normalization.running_mean __lowercase : List[Any] = pointer.normalization.running_var __lowercase : List[Any] = backbone.layer[pt_index + 1] __lowercase : Any = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" __lowercase : List[str] = pointer.convolution.weight __lowercase : Dict = pointer.normalization.bias __lowercase : str = pointer.normalization.weight __lowercase : int = pointer.normalization.running_mean __lowercase : List[str] = pointer.normalization.running_var if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : List[str] = """MobilenetV1/Logits/Conv2d_1c_1x1/""" __lowercase : Dict = model.classifier.weight __lowercase : Tuple = model.classifier.bias return tf_to_pt_map def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( """Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """ """https://www.tensorflow.org/install/ for installation instructions.""" ) raise # Load weights from TF model __lowercase : List[Any] = tf.train.list_variables(lowerCAmelCase_ ) __lowercase : Union[str, Any] = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) __lowercase : Dict = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Dict = array # Build TF to PyTorch weights loading map __lowercase : Union[str, Any] = _build_tf_to_pytorch_map(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue __lowercase : Tuple = tf_weights[name] if "depthwise_weights" in name: logger.info("""Transposing depthwise""" ) __lowercase : Optional[int] = np.transpose(lowerCAmelCase_ , (2, 3, 0, 1) ) elif "weights" in name: logger.info("""Transposing""" ) if len(pointer.shape ) == 2: # copying into linear layer __lowercase : Any = array.squeeze().transpose() else: __lowercase : int = np.transpose(lowerCAmelCase_ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) __lowercase : Tuple = torch.from_numpy(lowerCAmelCase_ ) tf_weights.pop(lowerCAmelCase_ , lowerCAmelCase_ ) tf_weights.pop(name + """/RMSProp""" , lowerCAmelCase_ ) tf_weights.pop(name + """/RMSProp_1""" , lowerCAmelCase_ ) tf_weights.pop(name + """/ExponentialMovingAverage""" , lowerCAmelCase_ ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def snake_case_ ( lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : nn.Convad ): __lowercase , __lowercase : List[str] = features.shape[-2:] __lowercase , __lowercase : Optional[int] = conv_layer.stride __lowercase , __lowercase : Tuple = conv_layer.kernel_size if in_height % stride_height == 0: __lowercase : Dict = max(kernel_height - stride_height , 0 ) else: __lowercase : List[str] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __lowercase : Any = max(kernel_width - stride_width , 0 ) else: __lowercase : Any = max(kernel_width - (in_width % stride_width) , 0 ) __lowercase : int = pad_along_width // 2 __lowercase : List[Any] = pad_along_width - pad_left __lowercase : Dict = pad_along_height // 2 __lowercase : Union[str, Any] = pad_along_height - pad_top __lowercase : Any = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(lowerCAmelCase_ , lowerCAmelCase_ , """constant""" , 0.0 ) class lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : int , __a : MobileNetVaConfig , __a : int , __a : int , __a : int , __a : Optional[int] = 1 , __a : Optional[int] = 1 , __a : bool = False , __a : Optional[bool] = True , __a : Optional[bool or str] = True , ) -> None: """simple docstring""" super().__init__() __lowercase : str = config if in_channels % groups != 0: raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups." ) if out_channels % groups != 0: raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups." ) __lowercase : List[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __lowercase : List[str] = nn.Convad( in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode="""zeros""" , ) if use_normalization: __lowercase : Optional[Any] = nn.BatchNormad( num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , ) else: __lowercase : Tuple = None if use_activation: if isinstance(__a , __a ): __lowercase : int = ACTaFN[use_activation] elif isinstance(config.hidden_act , __a ): __lowercase : Optional[int] = ACTaFN[config.hidden_act] else: __lowercase : List[str] = config.hidden_act else: __lowercase : Optional[Any] = None def lowerCAmelCase ( self : Union[str, Any] , __a : torch.Tensor ) -> torch.Tensor: """simple docstring""" if self.config.tf_padding: __lowercase : Union[str, Any] = apply_tf_padding(__a , self.convolution ) __lowercase : List[str] = self.convolution(__a ) if self.normalization is not None: __lowercase : Dict = self.normalization(__a ) if self.activation is not None: __lowercase : Union[str, Any] = self.activation(__a ) return features class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[int] = MobileNetVaConfig _A : Optional[int] = load_tf_weights_in_mobilenet_va _A : Tuple = '''mobilenet_v1''' _A : Any = '''pixel_values''' _A : List[Any] = False def lowerCAmelCase ( self : Any , __a : Union[nn.Linear, nn.Convad] ) -> None: """simple docstring""" if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) lowerCamelCase : Any = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCamelCase : Any = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , __a , ) class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : Any , __a : MobileNetVaConfig , __a : bool = True ) -> Any: """simple docstring""" super().__init__(__a ) __lowercase : str = config __lowercase : Dict = 32 __lowercase : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) __lowercase : Optional[Any] = MobileNetVaConvLayer( __a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , ) __lowercase : Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __lowercase : int = nn.ModuleList() for i in range(13 ): __lowercase : List[str] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __lowercase : int = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=1 , ) ) __lowercase : str = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase ( self : List[str] , __a : Tuple ) -> Any: """simple docstring""" raise NotImplementedError @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self : str , __a : Optional[torch.Tensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: """simple docstring""" __lowercase : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) __lowercase : Tuple = self.conv_stem(__a ) __lowercase : Optional[Any] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __lowercase : Optional[int] = layer_module(__a ) if output_hidden_states: __lowercase : int = all_hidden_states + (hidden_states,) __lowercase : List[str] = hidden_states if self.pooler is not None: __lowercase : List[Any] = torch.flatten(self.pooler(__a ) , start_dim=1 ) else: __lowercase : str = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__a , pooler_output=__a , hidden_states=__a , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __a , ) class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : str , __a : MobileNetVaConfig ) -> None: """simple docstring""" super().__init__(__a ) __lowercase : List[str] = config.num_labels __lowercase : Optional[int] = MobileNetVaModel(__a ) __lowercase : List[Any] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __lowercase : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a ) __lowercase : Tuple = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self : Any , __a : Optional[torch.Tensor] = None , __a : Optional[bool] = None , __a : Optional[torch.Tensor] = None , __a : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" __lowercase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __lowercase : Optional[int] = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a ) __lowercase : List[Any] = outputs.pooler_output if return_dict else outputs[1] __lowercase : Tuple = self.classifier(self.dropout(__a ) ) __lowercase : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase : List[str] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase : List[Any] = """single_label_classification""" else: __lowercase : Tuple = """multi_label_classification""" if self.config.problem_type == "regression": __lowercase : Any = MSELoss() if self.num_labels == 1: __lowercase : Tuple = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowercase : int = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __lowercase : List[str] = CrossEntropyLoss() __lowercase : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase : Dict = BCEWithLogitsLoss() __lowercase : List[str] = loss_fct(__a , __a ) if not return_dict: __lowercase : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
306
def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : int = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def snake_case_ ( lowerCAmelCase_ : int = 5000 ): __lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )] for i, pentagonal_i in enumerate(lowerCAmelCase_ ): for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ): __lowercase : int = pentagonal_nums[j] __lowercase : Optional[int] = pentagonal_i + pentagonal_j __lowercase : Union[str, Any] = pentagonal_j - pentagonal_i if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ): return b return -1 if __name__ == "__main__": print(f'''{solution() = }''')
306
1
from math import ceil def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ): __lowercase : List[Any] = list(range(0 , lowerCAmelCase_ ) ) __lowercase : Dict = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check __lowercase : Any = [] for i in device_map_blocks: if device_map_blocks.count(lowerCAmelCase_ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(lowerCAmelCase_ ) # Missing blocks __lowercase : Union[str, Any] = [i for i in blocks if i not in device_map_blocks] __lowercase : Tuple = [i for i in device_map_blocks if i not in blocks] if len(lowerCAmelCase_ ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(lowerCAmelCase_ ) ) if len(lowerCAmelCase_ ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(lowerCAmelCase_ ) ) if len(lowerCAmelCase_ ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(lowerCAmelCase_ ) ) def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ): __lowercase : str = list(range(lowerCAmelCase_ ) ) __lowercase : List[str] = int(ceil(n_layers / len(lowerCAmelCase_ ) ) ) __lowercase : List[str] = [layers[i : i + n_blocks] for i in range(0 , lowerCAmelCase_ , lowerCAmelCase_ )] return dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) )
306
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[Any] = (DPMSolverSDEScheduler,) _A : Dict = 10 def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]: """simple docstring""" __lowercase : Any = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**__a ) return config def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = self.scheduler_classes[0] __lowercase : List[str] = self.get_scheduler_config() __lowercase : Any = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[Any] = self.dummy_model() __lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Optional[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Optional[Any] = scheduler.step(__a , __a , __a ) __lowercase : str = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" ) __lowercase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[int] = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Dict = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Dict = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[int] = model(__a , __a ) __lowercase : Optional[int] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : List[str] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config() __lowercase : Optional[int] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : int = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowercase : int = scheduler.scale_model_input(__a , __a ) __lowercase : List[str] = model(__a , __a ) __lowercase : List[str] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : List[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : str = self.scheduler_classes[0] __lowercase : List[Any] = self.get_scheduler_config() __lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : List[str] = self.dummy_model() __lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma __lowercase : str = sample.to(__a ) for t in scheduler.timesteps: __lowercase : List[Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Any = scheduler.step(__a , __a , __a ) __lowercase : Optional[Any] = output.prev_sample __lowercase : Any = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
306
1
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING lowerCamelCase : int = logging.get_logger(__name__) @add_end_docstrings(__a ) class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : str , *__a : List[Any] , **__a : Dict ) -> Tuple: """simple docstring""" super().__init__(*__a , **__a ) requires_backends(self , """vision""" ) self.check_model_type(__a ) def __call__( self : Tuple , __a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__a : int ) -> List[Any]: """simple docstring""" return super().__call__(__a , **__a ) def lowerCAmelCase ( self : int , **__a : List[str] ) -> Optional[Any]: """simple docstring""" return {}, {}, {} def lowerCAmelCase ( self : int , __a : List[Any] ) -> Tuple: """simple docstring""" __lowercase : Any = load_image(__a ) __lowercase : List[Any] = image.size __lowercase : Union[str, Any] = self.image_processor(images=__a , return_tensors=self.framework ) return model_inputs def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict: """simple docstring""" __lowercase : Union[str, Any] = self.model(**__a ) return model_outputs def lowerCAmelCase ( self : Any , __a : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : int = model_outputs.predicted_depth __lowercase : List[Any] = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=__a ) __lowercase : str = prediction.squeeze().cpu().numpy() __lowercase : Dict = (output * 255 / np.max(__a )).astype("""uint8""" ) __lowercase : Optional[int] = Image.fromarray(__a ) __lowercase : int = {} __lowercase : str = predicted_depth __lowercase : str = depth return output_dict
306
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate lowerCamelCase : str = trt.Logger(trt.Logger.WARNING) lowerCamelCase : Any = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) lowerCamelCase : Optional[Any] = logging.getLogger(__name__) lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=3_84, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=1_28, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) lowerCamelCase : Dict = parser.parse_args() if args.tokenizer_name: lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) lowerCamelCase : List[str] = args.per_device_eval_batch_size lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties lowerCamelCase : List[str] = True lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine''' if args.fpaa: lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine''' if args.inta: lowerCamelCase : int = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)] lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: lowerCamelCase : List[str] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) lowerCamelCase : Optional[int] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) lowerCamelCase : Optional[Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ): __lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ ) # start time __lowercase : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Synchronize the stream and take time stream.synchronize() # end time __lowercase : int = time.time() __lowercase : Union[str, Any] = end_time - start_time __lowercase : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. lowerCamelCase : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0] lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1] lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). lowerCamelCase : Dict = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length) def snake_case_ ( lowerCAmelCase_ : int ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __lowercase : str = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __lowercase : List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __lowercase : Any = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ ) __lowercase : List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __lowercase : List[str] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __lowercase : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples lowerCamelCase : Tuple = raw_datasets['''validation'''] # Validation Feature Creation lowerCamelCase : Optional[int] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) lowerCamelCase : Union[str, Any] = default_data_collator lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) lowerCamelCase : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. __lowercase : int = postprocess_qa_predictions( examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __lowercase : Optional[int] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ ) lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def snake_case_ ( lowerCAmelCase_ : str ): return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize # Allocate device memory for inputs and outputs. lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes) lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. lowerCamelCase : Optional[int] = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') lowerCamelCase : int = 0.0 lowerCamelCase : List[str] = 0 lowerCamelCase : List[str] = timeit.default_timer() lowerCamelCase : List[Any] = None for step, batch in enumerate(eval_dataloader): lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs lowerCamelCase : Optional[Any] = torch.tensor(start_logits) lowerCamelCase : List[str] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset)) lowerCamelCase : Dict = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00)) logger.info('''Total Number of Inference = %d''', niter) lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds) lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
306
1
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate lowerCamelCase : str = trt.Logger(trt.Logger.WARNING) lowerCamelCase : Any = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) lowerCamelCase : Optional[Any] = logging.getLogger(__name__) lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=3_84, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=1_28, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) lowerCamelCase : Dict = parser.parse_args() if args.tokenizer_name: lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) lowerCamelCase : List[str] = args.per_device_eval_batch_size lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties lowerCamelCase : List[str] = True lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine''' if args.fpaa: lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine''' if args.inta: lowerCamelCase : int = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)] lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: lowerCamelCase : List[str] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) lowerCamelCase : Optional[int] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) lowerCamelCase : Optional[Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ): __lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ ) # start time __lowercase : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Synchronize the stream and take time stream.synchronize() # end time __lowercase : int = time.time() __lowercase : Union[str, Any] = end_time - start_time __lowercase : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. lowerCamelCase : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0] lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1] lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). lowerCamelCase : Dict = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length) def snake_case_ ( lowerCAmelCase_ : int ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __lowercase : str = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __lowercase : List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __lowercase : Any = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ ) __lowercase : List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __lowercase : List[str] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __lowercase : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples lowerCamelCase : Tuple = raw_datasets['''validation'''] # Validation Feature Creation lowerCamelCase : Optional[int] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) lowerCamelCase : Union[str, Any] = default_data_collator lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) lowerCamelCase : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. __lowercase : int = postprocess_qa_predictions( examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __lowercase : Optional[int] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ ) lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def snake_case_ ( lowerCAmelCase_ : str ): return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize # Allocate device memory for inputs and outputs. lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes) lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. lowerCamelCase : Optional[int] = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') lowerCamelCase : int = 0.0 lowerCamelCase : List[str] = 0 lowerCamelCase : List[str] = timeit.default_timer() lowerCamelCase : List[Any] = None for step, batch in enumerate(eval_dataloader): lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs lowerCamelCase : Optional[Any] = torch.tensor(start_logits) lowerCamelCase : List[str] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset)) lowerCamelCase : Dict = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00)) logger.info('''Total Number of Inference = %d''', niter) lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds) lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
306
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : str = { '''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''', } class lowerCAmelCase ( __a ): '''simple docstring''' _A : int = '''nllb-moe''' _A : List[str] = ['''past_key_values'''] _A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any: """simple docstring""" __lowercase : int = vocab_size __lowercase : List[Any] = max_position_embeddings __lowercase : Tuple = d_model __lowercase : str = encoder_ffn_dim __lowercase : List[str] = encoder_layers __lowercase : int = encoder_attention_heads __lowercase : List[Any] = decoder_ffn_dim __lowercase : int = decoder_layers __lowercase : Optional[int] = decoder_attention_heads __lowercase : Union[str, Any] = dropout __lowercase : str = attention_dropout __lowercase : Any = activation_dropout __lowercase : List[Any] = activation_function __lowercase : List[str] = init_std __lowercase : Optional[int] = encoder_layerdrop __lowercase : str = decoder_layerdrop __lowercase : Dict = use_cache __lowercase : Optional[Any] = encoder_layers __lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True __lowercase : List[Any] = router_z_loss_coef __lowercase : Tuple = router_aux_loss_coef __lowercase : str = decoder_sparse_step __lowercase : Any = encoder_sparse_step __lowercase : str = num_experts __lowercase : List[Any] = expert_capacity __lowercase : int = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) __lowercase : Optional[int] = router_dtype __lowercase : Any = router_ignore_padding_tokens __lowercase : Optional[Any] = batch_prioritized_routing __lowercase : str = second_expert_policy __lowercase : List[str] = normalize_router_prob_before_dropping __lowercase : List[Any] = moe_eval_capacity_token_fraction __lowercase : List[str] = moe_token_dropout __lowercase : Optional[Any] = output_router_logits super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
306
1
import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase ( __a ): '''simple docstring''' def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" __lowercase : Optional[int] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , """embed_dim""" ) ) self.parent.assertTrue(hasattr(__a , """num_heads""" ) ) class lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] , __a : Any , __a : Union[str, Any]=13 , __a : Optional[int]=64 , __a : List[str]=3 , __a : Any=[16, 48, 96] , __a : List[Any]=[1, 3, 6] , __a : Dict=[1, 2, 10] , __a : List[str]=[7, 3, 3] , __a : str=[4, 2, 2] , __a : List[str]=[2, 1, 1] , __a : Tuple=[2, 2, 2] , __a : Optional[Any]=[False, False, True] , __a : int=[0.0, 0.0, 0.0] , __a : Optional[Any]=0.02 , __a : Union[str, Any]=1E-12 , __a : str=True , __a : List[Any]=True , __a : Dict=2 , ) -> List[str]: """simple docstring""" __lowercase : Tuple = parent __lowercase : int = batch_size __lowercase : Union[str, Any] = image_size __lowercase : Optional[Any] = patch_sizes __lowercase : int = patch_stride __lowercase : List[str] = patch_padding __lowercase : int = is_training __lowercase : Optional[int] = use_labels __lowercase : Any = num_labels __lowercase : Optional[int] = num_channels __lowercase : List[str] = embed_dim __lowercase : str = num_heads __lowercase : List[Any] = stride_kv __lowercase : Optional[Any] = depth __lowercase : Tuple = cls_token __lowercase : Union[str, Any] = attention_drop_rate __lowercase : Union[str, Any] = initializer_range __lowercase : List[str] = layer_norm_eps def lowerCAmelCase ( self : Optional[int] ) -> int: """simple docstring""" __lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : Any = None if self.use_labels: __lowercase : Dict = ids_tensor([self.batch_size] , self.num_labels ) __lowercase : Union[str, Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : str , __a : Optional[Any] , __a : List[Any] , __a : List[Any] ) -> int: """simple docstring""" __lowercase : Union[str, Any] = CvtModel(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model(__a ) __lowercase : Union[str, Any] = (self.image_size, self.image_size) __lowercase , __lowercase : Tuple = image_size[0], image_size[1] for i in range(len(self.depth ) ): __lowercase : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __lowercase : Tuple = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCAmelCase ( self : Any , __a : Tuple , __a : Optional[int] , __a : str ) -> Optional[int]: """simple docstring""" __lowercase : Any = self.num_labels __lowercase : Tuple = CvtForImageClassification(__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : List[str] = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase : Any = config_and_inputs __lowercase : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Optional[int] = (CvtModel, CvtForImageClassification) if is_torch_available() else () _A : Dict = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _A : List[str] = False _A : Optional[Any] = False _A : Optional[Any] = False _A : Tuple = False _A : List[str] = False def lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" __lowercase : Any = CvtModelTester(self ) __lowercase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" return @unittest.skip(reason="""Cvt does not output attentions""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase , __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : List[str] = model_class(__a ) __lowercase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : Any = [*signature.parameters.keys()] __lowercase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" def check_hidden_states_output(__a : Tuple , __a : List[Any] , __a : Union[str, Any] ): __lowercase : Any = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowercase : str = model(**self._prepare_for_class(__a , __a ) ) __lowercase : Any = outputs.hidden_states __lowercase : List[str] = len(self.model_tester.depth ) self.assertEqual(len(__a ) , __a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[int] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase : List[Any] = True check_hidden_states_output(__a , __a , __a ) def lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" pass @slow def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : str = CvtModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case_ ( ): __lowercase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__a ) __lowercase : List[str] = self.default_image_processor __lowercase : List[str] = prepare_img() __lowercase : List[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): __lowercase : Union[str, Any] = model(**__a ) # verify the logits __lowercase : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __lowercase : Optional[Any] = torch.tensor([0.9285, 0.9015, -0.3150] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
306
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : Optional[Any] = { '''configuration_poolformer''': [ '''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PoolFormerConfig''', '''PoolFormerOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ['''PoolFormerFeatureExtractor'''] lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ '''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PoolFormerForImageClassification''', '''PoolFormerModel''', '''PoolFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
306
1
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCamelCase : Tuple = logging.get_logger(__name__) class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[int] = ['''audio_values''', '''audio_mask'''] def __init__( self : List[Any] , __a : Union[str, Any]=2048 , __a : Dict=1 , __a : Any=[16, 16] , __a : Union[str, Any]=128 , __a : Optional[Any]=44100 , __a : int=86 , __a : Any=2048 , __a : int=0.0 , **__a : Any , ) -> Tuple: """simple docstring""" super().__init__( feature_size=__a , sampling_rate=__a , padding_value=__a , **__a , ) __lowercase : Any = spectrogram_length __lowercase : Tuple = num_channels __lowercase : Dict = patch_size __lowercase : Any = feature_size // self.patch_size[1] __lowercase : Optional[int] = n_fft __lowercase : int = sampling_rate // hop_length_to_sampling_rate __lowercase : Optional[Any] = sampling_rate __lowercase : Dict = padding_value __lowercase : int = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=__a , norm="""slaney""" , mel_scale="""slaney""" , ).T def lowerCAmelCase ( self : Union[str, Any] , __a : np.array ) -> np.ndarray: """simple docstring""" __lowercase : List[Any] = spectrogram( __a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) __lowercase : Union[str, Any] = log_spec[:, :-1] __lowercase : Optional[int] = log_spec - 20.0 __lowercase : List[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Tuple , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = True , __a : Optional[int] = None , __a : bool = False , __a : bool = False , **__a : Optional[int] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" F" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __lowercase : Dict = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) __lowercase : List[str] = is_batched_numpy or ( isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowercase : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__a , np.ndarray ): __lowercase : int = np.asarray(__a , dtype=np.floataa ) elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowercase : List[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowercase : str = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __lowercase : List[str] = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __a ): __lowercase : Optional[Any] = [np.asarray(__a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __lowercase : Dict = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __lowercase : List[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __lowercase : List[Any] = np.array(__a ).astype(np.floataa ) # convert into correct format for padding __lowercase : List[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __lowercase : Optional[int] = np.ones([len(__a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __lowercase : List[str] = padded_audio_features * self.padding_value for i in range(len(__a ) ): __lowercase : Optional[Any] = audio_features[i] __lowercase : Optional[int] = feature # return as BatchFeature if return_attention_mask: __lowercase : Any = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: __lowercase : Optional[Any] = {"""audio_values""": padded_audio_features} __lowercase : Dict = BatchFeature(data=__a , tensor_type=__a ) return encoded_inputs
306
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = 2 __lowercase : Union[str, Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowerCAmelCase_ ) if n > 1: factors.append(lowerCAmelCase_ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
306
1
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[Any] = (DPMSolverSDEScheduler,) _A : Dict = 10 def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]: """simple docstring""" __lowercase : Any = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**__a ) return config def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = self.scheduler_classes[0] __lowercase : List[str] = self.get_scheduler_config() __lowercase : Any = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[Any] = self.dummy_model() __lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Optional[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Optional[Any] = scheduler.step(__a , __a , __a ) __lowercase : str = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" ) __lowercase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[int] = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Dict = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Dict = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[int] = model(__a , __a ) __lowercase : Optional[int] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : List[str] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config() __lowercase : Optional[int] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : int = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowercase : int = scheduler.scale_model_input(__a , __a ) __lowercase : List[str] = model(__a , __a ) __lowercase : List[str] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : List[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : str = self.scheduler_classes[0] __lowercase : List[Any] = self.get_scheduler_config() __lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : List[str] = self.dummy_model() __lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma __lowercase : str = sample.to(__a ) for t in scheduler.timesteps: __lowercase : List[Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Any = scheduler.step(__a , __a , __a ) __lowercase : Optional[Any] = output.prev_sample __lowercase : Any = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
306
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __lowercase : List[str] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __lowercase : Optional[Any] = model(__a )["""last_hidden_state"""] __lowercase : Any = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __a ) # compare the actual values for a slice. __lowercase : Dict = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
306
1
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowerCAmelCase : '''simple docstring''' _A : Optional[int] = BlenderbotSmallConfig _A : Optional[int] = {} _A : List[Any] = '''gelu''' def __init__( self : List[Any] , __a : int , __a : Any=13 , __a : int=7 , __a : List[str]=True , __a : Dict=False , __a : Dict=99 , __a : Optional[Any]=32 , __a : List[str]=2 , __a : Tuple=4 , __a : List[Any]=37 , __a : Union[str, Any]=0.1 , __a : Dict=0.1 , __a : Optional[Any]=20 , __a : Optional[Any]=2 , __a : List[str]=1 , __a : List[str]=0 , ) -> Optional[Any]: """simple docstring""" __lowercase : str = parent __lowercase : int = batch_size __lowercase : List[Any] = seq_length __lowercase : str = is_training __lowercase : List[str] = use_labels __lowercase : Union[str, Any] = vocab_size __lowercase : List[str] = hidden_size __lowercase : List[Any] = num_hidden_layers __lowercase : List[Any] = num_attention_heads __lowercase : int = intermediate_size __lowercase : List[str] = hidden_dropout_prob __lowercase : List[str] = attention_probs_dropout_prob __lowercase : int = max_position_embeddings __lowercase : int = eos_token_id __lowercase : Optional[int] = pad_token_id __lowercase : int = bos_token_id def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __lowercase : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __lowercase : str = tf.concat([input_ids, eos_tensor] , axis=1 ) __lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : List[str] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowercase : List[Any] = prepare_blenderbot_small_inputs_dict(__a , __a , __a ) return config, inputs_dict def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any] , __a : Any ) -> Optional[Any]: """simple docstring""" __lowercase : Any = TFBlenderbotSmallModel(config=__a ).get_decoder() __lowercase : Any = inputs_dict["""input_ids"""] __lowercase : Optional[Any] = input_ids[:1, :] __lowercase : int = inputs_dict["""attention_mask"""][:1, :] __lowercase : Tuple = inputs_dict["""head_mask"""] __lowercase : Any = 1 # first forward pass __lowercase : Union[str, Any] = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) __lowercase , __lowercase : Optional[int] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowercase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowercase : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __lowercase : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 ) __lowercase : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __lowercase : Tuple = model(__a , attention_mask=__a )[0] __lowercase : List[str] = model(__a , attention_mask=__a , past_key_values=__a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __lowercase : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __lowercase : List[str] = output_from_no_past[:, -3:, random_slice_idx] __lowercase : Tuple = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__a , __a , rtol=1E-3 ) def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Any=None , ): if attention_mask is None: __lowercase : Dict = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __lowercase : Optional[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __lowercase : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowercase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowercase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Any = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) _A : Any = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () _A : List[Any] = ( { '''conversational''': TFBlenderbotSmallForConditionalGeneration, '''feature-extraction''': TFBlenderbotSmallModel, '''summarization''': TFBlenderbotSmallForConditionalGeneration, '''text2text-generation''': TFBlenderbotSmallForConditionalGeneration, '''translation''': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) _A : Optional[int] = True _A : int = False _A : str = False def lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = TFBlenderbotSmallModelTester(self ) __lowercase : Optional[Any] = ConfigTester(self , config_class=__a ) def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a ) @require_tokenizers @require_tf class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = [ '''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ''' ''' i\'m going to throw up.\nand why is that?''' ] _A : Any = '''facebook/blenderbot_small-90M''' @cached_property def lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) @cached_property def lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" __lowercase : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" __lowercase : str = self.tokenizer(self.src_text , return_tensors="""tf""" ) __lowercase : List[Any] = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , ) __lowercase : int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
306
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Optional[Any] = len(lowerCAmelCase_ ) __lowercase : str = len(lowerCAmelCase_ ) __lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __lowercase : Tuple = True for i in range(lowerCAmelCase_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __lowercase : Optional[Any] = True if a[i].islower(): __lowercase : Dict = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
306
1
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING lowerCamelCase : int = logging.get_logger(__name__) @add_end_docstrings(__a ) class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : Tuple , **__a : Dict ) -> str: """simple docstring""" super().__init__(**__a ) requires_backends(self , """vision""" ) requires_backends(self , """torch""" ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) self.check_model_type(__a ) def lowerCAmelCase ( self : Any , **__a : List[str] ) -> List[Any]: """simple docstring""" __lowercase : str = {} __lowercase : List[Any] = {} __lowercase : int = {} # preprocess args if "points_per_batch" in kwargs: __lowercase : str = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: __lowercase : Union[str, Any] = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: __lowercase : str = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: __lowercase : str = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: __lowercase : List[str] = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: __lowercase : List[str] = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: __lowercase : Optional[int] = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: __lowercase : str = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: __lowercase : List[Any] = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: __lowercase : List[str] = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: __lowercase : Optional[int] = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: __lowercase : Union[str, Any] = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : int , __a : int , *__a : List[str] , __a : Dict=None , __a : int=None , **__a : Tuple ) -> Optional[int]: """simple docstring""" return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a ) def lowerCAmelCase ( self : int , __a : Tuple , __a : List[str]=64 , __a : int = 0 , __a : float = 512 / 1500 , __a : Optional[int] = 32 , __a : Optional[int] = 1 , ) -> Tuple: """simple docstring""" __lowercase : str = load_image(__a ) __lowercase : str = self.image_processor.size["""longest_edge"""] __lowercase , __lowercase , __lowercase , __lowercase : List[Any] = self.image_processor.generate_crop_boxes( __a , __a , __a , __a , __a , __a ) __lowercase : List[str] = self.image_processor(images=__a , return_tensors="""pt""" ) with self.device_placement(): if self.framework == "pt": __lowercase : Union[str, Any] = self.get_inference_context() with inference_context(): __lowercase : int = self._ensure_tensor_on_device(__a , device=self.device ) __lowercase : Union[str, Any] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) ) __lowercase : int = image_embeddings __lowercase : List[Any] = grid_points.shape[1] __lowercase : Optional[int] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""" ) for i in range(0 , __a , __a ): __lowercase : List[str] = grid_points[:, i : i + points_per_batch, :, :] __lowercase : str = input_labels[:, i : i + points_per_batch] __lowercase : Optional[int] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : List[Any]=0.88 , __a : Any=0.95 , __a : Any=0 , __a : List[Any]=1 , ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = model_inputs.pop("""input_boxes""" ) __lowercase : Union[str, Any] = model_inputs.pop("""is_last""" ) __lowercase : str = model_inputs.pop("""original_sizes""" ).tolist() __lowercase : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist() __lowercase : Optional[Any] = self.model(**__a ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __lowercase : str = model_outputs["""pred_masks"""] __lowercase : str = self.image_processor.post_process_masks( __a , __a , __a , __a , binarize=__a ) __lowercase : Optional[Any] = model_outputs["""iou_scores"""] __lowercase , __lowercase , __lowercase : int = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : List[str]=False , __a : str=False , __a : Any=0.7 , ) -> Any: """simple docstring""" __lowercase : Any = [] __lowercase : Tuple = [] __lowercase : Union[str, Any] = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""" ) ) all_masks.extend(model_output.pop("""masks""" ) ) all_boxes.append(model_output.pop("""boxes""" ) ) __lowercase : List[Any] = torch.cat(__a ) __lowercase : Tuple = torch.cat(__a ) __lowercase , __lowercase , __lowercase , __lowercase : Dict = self.image_processor.post_process_for_mask_generation( __a , __a , __a , __a ) __lowercase : Optional[Any] = defaultdict(__a ) for output in model_outputs: for k, v in output.items(): extra[k].append(__a ) __lowercase : Optional[Any] = {} if output_rle_mask: __lowercase : List[str] = rle_mask if output_bboxes_mask: __lowercase : Any = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
306
from scipy.stats import spearmanr import datasets lowerCamelCase : List[str] = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' lowerCamelCase : List[str] = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' lowerCamelCase : Union[str, Any] = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]: """simple docstring""" __lowercase : Optional[Any] = spearmanr(__a , __a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
306
1
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def snake_case_ ( lowerCAmelCase_ : Dict ): __lowercase : Any = tmp_path / """file.csv""" __lowercase : Dict = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(lowerCAmelCase_ , """w""" ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ ) @pytest.fixture def snake_case_ ( lowerCAmelCase_ : Optional[int] ): __lowercase : List[Any] = tmp_path / """malformed_file.csv""" __lowercase : Union[str, Any] = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(lowerCAmelCase_ , """w""" ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ ) @pytest.fixture def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ): __lowercase : str = tmp_path / """csv_with_image.csv""" __lowercase : Dict = textwrap.dedent( F"\\n image\n {image_file}\n " ) with open(lowerCAmelCase_ , """w""" ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ ) @pytest.fixture def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ): __lowercase : Optional[Any] = tmp_path / """csv_with_label.csv""" __lowercase : Optional[int] = textwrap.dedent( """\ label good bad good """ ) with open(lowerCAmelCase_ , """w""" ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ ) @pytest.fixture def snake_case_ ( lowerCAmelCase_ : Dict ): __lowercase : str = tmp_path / """csv_with_int_list.csv""" __lowercase : Union[str, Any] = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(lowerCAmelCase_ , """w""" ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ ) def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any ): __lowercase : Any = Csv() __lowercase : List[str] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCAmelCase_ , match="""Error tokenizing data""" ): for _ in generator: pass assert any( record.levelname == """ERROR""" and """Failed to read file""" in record.message and os.path.basename(lowerCAmelCase_ ) in record.message for record in caplog.records ) @require_pil def snake_case_ ( lowerCAmelCase_ : str ): with open(lowerCAmelCase_ , encoding="""utf-8""" ) as f: __lowercase : Optional[Any] = f.read().splitlines()[1] __lowercase : Dict = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) ) __lowercase : Dict = csv._generate_tables([[csv_file_with_image]] ) __lowercase : str = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""image""" ).type == Image()() __lowercase : List[Any] = pa_table.to_pydict()["""image"""] assert generated_content == [{"path": image_file, "bytes": None}] def snake_case_ ( lowerCAmelCase_ : int ): with open(lowerCAmelCase_ , encoding="""utf-8""" ) as f: __lowercase : List[Any] = f.read().splitlines()[1:] __lowercase : Tuple = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) ) __lowercase : Optional[Any] = csv._generate_tables([[csv_file_with_label]] ) __lowercase : Any = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )() __lowercase : Tuple = pa_table.to_pydict()["""label"""] assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCAmelCase_ ) for label in labels] def snake_case_ ( lowerCAmelCase_ : Any ): __lowercase : Tuple = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCAmelCase_ : [int(lowerCAmelCase_ ) for i in x.split()]} ) __lowercase : int = csv._generate_tables([[csv_file_with_int_list]] ) __lowercase : int = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type ) __lowercase : Union[str, Any] = pa_table.to_pydict()["""int_list"""] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
306
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Any = get_failure_array(lowerCAmelCase_ ) # 2) Step through text searching for pattern __lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern while i < len(lowerCAmelCase_ ): if pattern[j] == text[i]: if j == (len(lowerCAmelCase_ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase : Optional[Any] = failure[j - 1] continue i += 1 return False def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : List[Any] = [0] __lowercase : Optional[Any] = 0 __lowercase : List[Any] = 1 while j < len(lowerCAmelCase_ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase : List[str] = failure[i - 1] continue j += 1 failure.append(lowerCAmelCase_ ) return failure if __name__ == "__main__": # Test 1) lowerCamelCase : Dict = '''abc1abc12''' lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCamelCase : List[Any] = '''ABABX''' lowerCamelCase : List[Any] = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCamelCase : int = '''AAAB''' lowerCamelCase : Optional[int] = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCamelCase : Optional[Any] = '''abcdabcy''' lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCamelCase : Dict = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
306
1
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) __lowercase : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) __lowercase : Union[str, Any] = """xvjiarui/stable-diffusion-2-inpainting""" __lowercase , __lowercase : List[str] = FlaxStableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a ) __lowercase : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench""" __lowercase : List[str] = jax.random.PRNGKey(0 ) __lowercase : List[Any] = 50 __lowercase : str = jax.device_count() __lowercase : List[str] = num_samples * [prompt] __lowercase : int = num_samples * [init_image] __lowercase : Union[str, Any] = num_samples * [mask_image] __lowercase , __lowercase , __lowercase : Union[str, Any] = pipeline.prepare_inputs(__a , __a , __a ) # shard inputs and rng __lowercase : Tuple = replicate(__a ) __lowercase : List[str] = jax.random.split(__a , jax.device_count() ) __lowercase : List[str] = shard(__a ) __lowercase : int = shard(__a ) __lowercase : List[Any] = shard(__a ) __lowercase : Optional[int] = pipeline( __a , __a , __a , __a , __a , __a , jit=__a ) __lowercase : List[str] = output.images.reshape(__a , 512 , 512 , 3 ) __lowercase : List[Any] = images[0, 253:256, 253:256, -1] __lowercase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase : Optional[Any] = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
306
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''pixel_values'''] def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None: """simple docstring""" super().__init__(**__a ) __lowercase : Dict = size if size is not None else {"""shortest_edge""": 224} __lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a ) __lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" ) __lowercase : Optional[int] = do_resize __lowercase : Union[str, Any] = size __lowercase : List[Any] = resample __lowercase : Any = do_center_crop __lowercase : Dict = crop_size __lowercase : int = do_rescale __lowercase : Tuple = rescale_factor __lowercase : List[Any] = do_normalize __lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD __lowercase : Union[str, Any] = do_convert_rgb def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray: """simple docstring""" __lowercase : Dict = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) __lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray: """simple docstring""" __lowercase : Tuple = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: """simple docstring""" __lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize __lowercase : Dict = size if size is not None else self.size __lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a ) __lowercase : int = resample if resample is not None else self.resample __lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a ) __lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Tuple = image_mean if image_mean is not None else self.image_mean __lowercase : str = image_std if image_std is not None else self.image_std __lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase : Union[str, Any] = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images] # All transformations expect numpy arrays. __lowercase : Any = [to_numpy_array(__a ) for image in images] if do_resize: __lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_center_crop: __lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: __lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: __lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] __lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images] __lowercase : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=__a , tensor_type=__a )
306
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : List[Any] = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = '''gpt_neox''' def __init__( self : Dict , __a : Union[str, Any]=50432 , __a : Any=6144 , __a : Any=44 , __a : Union[str, Any]=64 , __a : Optional[Any]=24576 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=0.25 , __a : Any=10000 , __a : Optional[Any]=0.0 , __a : List[Any]=0.0 , __a : Tuple=0.1 , __a : str=2048 , __a : List[str]=0.02 , __a : Optional[int]=1E-5 , __a : Union[str, Any]=True , __a : Union[str, Any]=0 , __a : Tuple=2 , __a : List[str]=False , __a : Dict=True , __a : Any=None , **__a : Any , ) -> Union[str, Any]: """simple docstring""" super().__init__(bos_token_id=__a , eos_token_id=__a , **__a ) __lowercase : Tuple = vocab_size __lowercase : Optional[Any] = max_position_embeddings __lowercase : List[Any] = hidden_size __lowercase : Optional[Any] = num_hidden_layers __lowercase : List[str] = num_attention_heads __lowercase : str = intermediate_size __lowercase : Optional[int] = hidden_act __lowercase : Dict = rotary_pct __lowercase : Optional[int] = rotary_emb_base __lowercase : List[Any] = attention_dropout __lowercase : Union[str, Any] = hidden_dropout __lowercase : List[Any] = classifier_dropout __lowercase : Dict = initializer_range __lowercase : Dict = layer_norm_eps __lowercase : str = use_cache __lowercase : List[str] = tie_word_embeddings __lowercase : int = use_parallel_residual __lowercase : Dict = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( """The hidden size is not divisble by the number of attention heads! Make sure to update them!""" ) def lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F"got {self.rope_scaling}" ) __lowercase : str = self.rope_scaling.get("""type""" , __a ) __lowercase : Any = self.rope_scaling.get("""factor""" , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
306
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ): __lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ ) return new.join(lowerCAmelCase_ ) def snake_case_ ( lowerCAmelCase_ : List[Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = {} __lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." ) if "res_path" in key: __lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): __lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): __lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 ) __lowercase : Dict = value.float() return upgrade @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ): from dall_e import Encoder __lowercase : Any = Encoder() if os.path.exists(lowerCAmelCase_ ): __lowercase : List[Any] = torch.load(lowerCAmelCase_ ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : int = ckpt.state_dict() encoder.load_state_dict(lowerCAmelCase_ ) if config_path is not None: __lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ ) else: __lowercase : List[str] = FlavaImageCodebookConfig() __lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval() __lowercase : List[Any] = encoder.state_dict() __lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ ) hf_model.load_state_dict(lowerCAmelCase_ ) __lowercase : Dict = hf_model.state_dict() __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(lowerCAmelCase_ ) else: return hf_state_dict if __name__ == "__main__": lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCamelCase : Union[str, Any] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
306
1
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : Tuple = BertJapaneseTokenizer _A : Optional[Any] = False _A : Optional[int] = True def lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" super().setUp() __lowercase : Dict = [ """[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは""", """世界""", """##世界""", """、""", """##、""", """。""", """##。""", ] __lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def lowerCAmelCase ( self : str , __a : str ) -> Optional[int]: """simple docstring""" __lowercase : str = """こんにちは、世界。 \nこんばんは、世界。""" __lowercase : int = """こんにちは 、 世界 。 こんばんは 、 世界 。""" return input_text, output_text def lowerCAmelCase ( self : int , __a : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase , __lowercase : str = self.get_input_output_texts(__a ) __lowercase : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a ) __lowercase : int = tokenizer.decode(__a , clean_up_tokenization_spaces=__a ) return text, ids def lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" pass # TODO add if relevant def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" pass # TODO add if relevant def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" pass # TODO add if relevant def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Tuple = self.tokenizer_class(self.vocab_file ) __lowercase : Any = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" ) self.assertListEqual(__a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" ) self.assertIsNotNone(__a ) __lowercase : Union[str, Any] = """こんにちは、世界。\nこんばんは、世界。""" __lowercase : Dict = tokenizer.tokenize(__a ) self.assertListEqual(__a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) __lowercase : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(__a , """wb""" ) as handle: pickle.dump(__a , __a ) with open(__a , """rb""" ) as handle: __lowercase : int = pickle.load(__a ) __lowercase : List[Any] = tokenizer_new.tokenize(__a ) self.assertListEqual(__a , __a ) def lowerCAmelCase ( self : Any ) -> Tuple: """simple docstring""" __lowercase : Any = MecabTokenizer(mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" try: __lowercase : List[Any] = MecabTokenizer(mecab_dic="""unidic_lite""" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" try: __lowercase : Any = MecabTokenizer(mecab_dic="""unidic""" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" __lowercase : int = MecabTokenizer(do_lower_case=__a , mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" try: __lowercase : Union[str, Any] = MecabTokenizer( do_lower_case=__a , normalize_text=__a , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" __lowercase : int = MecabTokenizer(normalize_text=__a , mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , ) @require_sudachi def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" __lowercase : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" ) self.assertIsNotNone(__a ) __lowercase : Union[str, Any] = """こんにちは、世界。\nこんばんは、世界。""" __lowercase : Optional[Any] = tokenizer.tokenize(__a ) self.assertListEqual(__a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) __lowercase : Optional[Any] = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(__a , """wb""" ) as handle: pickle.dump(__a , __a ) with open(__a , """rb""" ) as handle: __lowercase : Tuple = pickle.load(__a ) __lowercase : Dict = tokenizer_new.tokenize(__a ) self.assertListEqual(__a , __a ) @require_sudachi def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase : List[str] = SudachiTokenizer(sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] ) @require_sudachi def lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" __lowercase : int = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] ) @require_sudachi def lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" __lowercase : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] ) @require_sudachi def lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : Union[str, Any] = SudachiTokenizer(do_lower_case=__a , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : List[str] = SudachiTokenizer(normalize_text=__a , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , ) @require_sudachi def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" __lowercase : Optional[int] = SudachiTokenizer(trim_whitespace=__a , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) @require_jumanpp def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" ) self.assertIsNotNone(__a ) __lowercase : Any = """こんにちは、世界。\nこんばんは、世界。""" __lowercase : List[Any] = tokenizer.tokenize(__a ) self.assertListEqual(__a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) __lowercase : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(__a , """wb""" ) as handle: pickle.dump(__a , __a ) with open(__a , """rb""" ) as handle: __lowercase : List[Any] = pickle.load(__a ) __lowercase : Any = tokenizer_new.tokenize(__a ) self.assertListEqual(__a , __a ) @require_jumanpp def lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase : Optional[Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase : List[Any] = JumanppTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" __lowercase : int = JumanppTokenizer(normalize_text=__a ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase : Optional[Any] = JumanppTokenizer(trim_whitespace=__a ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , ) @require_jumanpp def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : int = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , ) def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""] __lowercase : int = {} for i, token in enumerate(__a ): __lowercase : Dict = i __lowercase : int = WordpieceTokenizer(vocab=__a , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] ) self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] ) self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] ) def lowerCAmelCase ( self : str ) -> Any: """simple docstring""" __lowercase : str = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" ) __lowercase : Optional[Any] = tokenizer.subword_tokenizer __lowercase : str = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" ) self.assertListEqual(__a , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] ) __lowercase : Any = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" ) self.assertListEqual(__a , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] ) def lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" __lowercase : Optional[Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" ) __lowercase : Optional[Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=__a ) __lowercase : List[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__a ) __lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a ) __lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : Any = BertJapaneseTokenizer _A : Tuple = False def lowerCAmelCase ( self : Optional[Any] ) -> Any: """simple docstring""" super().setUp() __lowercase : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""] __lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def lowerCAmelCase ( self : Dict , **__a : str ) -> Optional[int]: """simple docstring""" return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **__a ) def lowerCAmelCase ( self : Dict , __a : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase : Any = """こんにちは、世界。 \nこんばんは、世界。""" __lowercase : Dict = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。""" return input_text, output_text def lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" pass # TODO add if relevant def lowerCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" pass # TODO add if relevant def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" pass # TODO add if relevant def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase : Any = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" ) __lowercase : Optional[Any] = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" ) self.assertListEqual( __a , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__a ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""] __lowercase : Dict = {} for i, token in enumerate(__a ): __lowercase : List[str] = i __lowercase : Tuple = CharacterTokenizer(vocab=__a , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] ) self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] ) def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : List[Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" ) __lowercase : Optional[int] = tokenizer.encode("""ありがとう。""" , add_special_tokens=__a ) __lowercase : Optional[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__a ) __lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a ) __lowercase : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : List[Any] = """cl-tohoku/bert-base-japanese""" __lowercase : Tuple = AutoTokenizer.from_pretrained(__a ) self.assertIsInstance(__a , __a ) class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Any ) -> Tuple: """simple docstring""" __lowercase : Union[str, Any] = """cl-tohoku/bert-base-japanese""" with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm: BertTokenizer.from_pretrained(__a ) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from.""" ) ) __lowercase : Tuple = """bert-base-cased""" with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm: BertJapaneseTokenizer.from_pretrained(__a ) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from.""" ) )
306
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCamelCase : Tuple = logging.get_logger(__name__) logging.set_verbosity_info() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if "xprophetnet" in prophetnet_checkpoint_path: __lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) else: __lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) __lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""] __lowercase : Optional[int] = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: __lowercase : Tuple = key.split(""".""" ) if attributes[0] == "lm_head": __lowercase : str = prophet __lowercase : List[str] = prophet_old else: __lowercase : Tuple = prophet.prophetnet __lowercase : Union[str, Any] = prophet_old.model __lowercase : Optional[Any] = False for attribute in attributes: if attribute in mapping: __lowercase : Optional[int] = mapping[attribute] if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0: __lowercase : str = attribute elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : List[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __lowercase : Any = old_model.weight logger.info(F"{attribute} is initialized." ) __lowercase : Any = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __lowercase : Dict = old_model.bias logger.info(F"{attribute} is initialized" ) __lowercase : int = True break elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ): __lowercase : Dict = old_model.in_proj_weight.shape[0] // 3 __lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __lowercase : int = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." __lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) __lowercase : int = True break if attribute.isdigit(): __lowercase : Tuple = model[int(lowerCAmelCase_ )] __lowercase : int = old_model[int(lowerCAmelCase_ )] else: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if old_attribute == "": __lowercase : int = old_model else: if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError(F"{old_model} does not have {old_attribute}" ) __lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if not is_key_init: raise ValueError(F"{key} was not correctly initialized!" ) print(F"Saving model to {pytorch_dump_folder_path}" ) prophet.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase : Any = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
306
1
import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments lowerCamelCase : List[str] = logging.getLogger(__name__) @dataclass class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[float] = field( default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} ) _A : bool = field(default=__a , metadata={'''help''': '''Whether to SortishSamler or not.'''} ) _A : bool = field( default=__a , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) _A : bool = field(default=__a , metadata={'''help''': '''whether to use adafactor'''} ) _A : Optional[float] = field( default=__a , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} ) _A : Optional[float] = field( default=__a , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} ) _A : Optional[float] = field(default=__a , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} ) _A : Optional[float] = field( default=__a , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} ) _A : Optional[str] = field( default='''linear''' , metadata={'''help''': f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
306
def snake_case_ ( lowerCAmelCase_ : int = 200 ): __lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200] __lowercase : List[str] = [0] * (pence + 1) __lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowerCAmelCase_ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
306
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : Optional[int] = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class lowerCAmelCase ( __a ): '''simple docstring''' _A : str = '''switch_transformers''' _A : Any = ['''past_key_values'''] _A : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Union[str, Any] , __a : str=32128 , __a : Dict=768 , __a : str=64 , __a : List[str]=2048 , __a : List[Any]=64 , __a : Union[str, Any]=12 , __a : Optional[Any]=3 , __a : str=12 , __a : int=3 , __a : Tuple=12 , __a : Any=8 , __a : List[str]=False , __a : Union[str, Any]=0.01 , __a : List[Any]="float32" , __a : Tuple=False , __a : List[Any]=32 , __a : int=128 , __a : List[Any]=0.1 , __a : str=1E-6 , __a : Optional[Any]=0.001 , __a : Optional[int]=0.001 , __a : str=1.0 , __a : List[Any]="relu" , __a : Dict=True , __a : Optional[Any]=False , __a : Optional[Any]=True , __a : Optional[Any]=0 , __a : Union[str, Any]=1 , **__a : Optional[int] , ) -> List[Any]: """simple docstring""" __lowercase : Dict = vocab_size __lowercase : List[Any] = d_model __lowercase : Dict = d_kv __lowercase : List[Any] = d_ff __lowercase : Dict = num_sparse_encoder_layers __lowercase : int = num_layers __lowercase : Optional[Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __lowercase : Union[str, Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __lowercase : str = self.num_layers // self.num_sparse_encoder_layers else: __lowercase : str = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __lowercase : int = self.num_decoder_layers // self.num_sparse_decoder_layers else: __lowercase : str = self.num_decoder_layers # HACK: this will create 0 sparse layers __lowercase : Optional[Any] = num_heads __lowercase : Any = num_experts __lowercase : Optional[int] = expert_capacity __lowercase : Optional[int] = router_bias __lowercase : List[Any] = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) __lowercase : int = router_dtype __lowercase : List[str] = router_ignore_padding_tokens __lowercase : str = relative_attention_num_buckets __lowercase : List[Any] = relative_attention_max_distance __lowercase : Any = dropout_rate __lowercase : Dict = layer_norm_epsilon __lowercase : Optional[Any] = initializer_factor __lowercase : int = feed_forward_proj __lowercase : Tuple = use_cache __lowercase : Optional[Any] = add_router_probs __lowercase : Optional[Any] = router_z_loss_coef __lowercase : List[str] = router_aux_loss_coef __lowercase : Union[str, Any] = self.feed_forward_proj.split("""-""" ) __lowercase : List[str] = act_info[-1] __lowercase : Optional[int] = act_info[0] == """gated""" if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __lowercase : List[Any] = """gelu_new""" super().__init__( pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
306
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any: """simple docstring""" __lowercase : Optional[int] = parent __lowercase : List[str] = out_indices if out_indices is not None else [4] __lowercase : Optional[int] = stage_names __lowercase : Any = out_features __lowercase : Optional[Any] = backbone __lowercase : Optional[Any] = batch_size __lowercase : Union[str, Any] = image_size __lowercase : List[str] = num_channels __lowercase : str = use_pretrained_backbone __lowercase : str = is_training def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : str = self.get_config() return config, pixel_values def lowerCAmelCase ( self : int ) -> str: """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict: """simple docstring""" __lowercase : Dict = TimmBackbone(config=__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowercase : Optional[Any] = model(__a ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Union[str, Any] = self.prepare_config_and_inputs() __lowercase , __lowercase : str = config_and_inputs __lowercase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch @require_timm class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ): '''simple docstring''' _A : List[Any] = (TimmBackbone,) if is_torch_available() else () _A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {} _A : List[Any] = False _A : List[str] = False _A : Any = False _A : Optional[Any] = False def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : str = TimmBackboneModelTester(self ) __lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a ) def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : Tuple = """resnet18""" __lowercase : Optional[int] = """microsoft/resnet-18""" __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a ) __lowercase : Dict = AutoBackbone.from_pretrained(__a ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] ) __lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("""TimmBackbone doesn't support feed forward chunking""" ) def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" ) def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("""TimmBackbone initialization is managed on the timm side""" ) def lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" ) def lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't support output_attentions.""" ) def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Safetensors is not supported by timm.""" ) def lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[Any] = model_class(__a ) __lowercase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" __lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Optional[Any] = True __lowercase : Union[str, Any] = self.has_attentions # no need to test all models as different heads yield the same functionality __lowercase : Union[str, Any] = self.all_model_classes[0] __lowercase : List[Any] = model_class(__a ) model.to(__a ) __lowercase : Optional[Any] = self._prepare_for_class(__a , __a ) __lowercase : Union[str, Any] = model(**__a ) __lowercase : Optional[int] = outputs[0][-1] # Encoder-/Decoder-only models __lowercase : Any = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __lowercase : Optional[int] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__a ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : List[str] = model_class(__a ) model.to(__a ) model.eval() __lowercase : int = model(**__a ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __lowercase : Any = copy.deepcopy(__a ) __lowercase : Dict = None __lowercase : Tuple = model_class(__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model(**__a ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __lowercase : List[str] = copy.deepcopy(__a ) __lowercase : Optional[Any] = False __lowercase : str = model_class(__a ) model.to(__a ) model.eval() __lowercase : List[Any] = model(**__a )
306
1
import numpy as np from transformers import Pipeline def snake_case_ ( lowerCAmelCase_ : List[Any] ): __lowercase : Optional[Any] = np.max(lowerCAmelCase_ , axis=-1 , keepdims=lowerCAmelCase_ ) __lowercase : Dict = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase_ ) class lowerCAmelCase ( __a ): '''simple docstring''' def lowerCAmelCase ( self : Optional[Any] , **__a : int ) -> Tuple: """simple docstring""" __lowercase : List[str] = {} if "second_text" in kwargs: __lowercase : List[Any] = kwargs["""second_text"""] return preprocess_kwargs, {}, {} def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any=None ) -> Any: """simple docstring""" return self.tokenizer(__a , text_pair=__a , return_tensors=self.framework ) def lowerCAmelCase ( self : Dict , __a : Union[str, Any] ) -> List[str]: """simple docstring""" return self.model(**__a ) def lowerCAmelCase ( self : int , __a : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase : str = model_outputs.logits[0].numpy() __lowercase : Tuple = softmax(__a ) __lowercase : Any = np.argmax(__a ) __lowercase : List[str] = self.model.config.idalabel[best_class] __lowercase : int = probabilities[best_class].item() __lowercase : Any = logits.tolist() return {"label": label, "score": score, "logits": logits}
306
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : str = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } lowerCamelCase : Optional[Any] = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ): for attribute in key.split(""".""" ): __lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: __lowercase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowercase : Dict = value elif weight_type == "weight_g": __lowercase : Union[str, Any] = value elif weight_type == "weight_v": __lowercase : List[Any] = value elif weight_type == "bias": __lowercase : int = value elif weight_type == "running_mean": __lowercase : List[Any] = value elif weight_type == "running_var": __lowercase : int = value elif weight_type == "num_batches_tracked": __lowercase : int = value elif weight_type == "inv_freq": __lowercase : Optional[Any] = value else: __lowercase : Any = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ): __lowercase : str = [] __lowercase : Any = fairseq_model.state_dict() __lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Optional[Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __lowercase : Tuple = True if "*" in mapped_key: __lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2] __lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ ) if "pos_bias_u" in name: __lowercase : Any = None elif "pos_bias_v" in name: __lowercase : Tuple = None elif "weight_g" in name: __lowercase : Union[str, Any] = """weight_g""" elif "weight_v" in name: __lowercase : Dict = """weight_v""" elif "bias" in name: __lowercase : Union[str, Any] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowercase : str = """weight""" elif "running_mean" in name: __lowercase : str = """running_mean""" elif "inv_freq" in name: __lowercase : List[Any] = """inv_freq""" elif "running_var" in name: __lowercase : Any = """running_var""" elif "num_batches_tracked" in name: __lowercase : Any = """num_batches_tracked""" else: __lowercase : Optional[int] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(F"Unused weights: {unused_weights}" ) def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ): __lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1] __lowercase : int = name.split(""".""" ) __lowercase : Optional[Any] = int(items[0] ) __lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowercase : List[str] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __lowercase : Dict = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ): if config_path is not None: __lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" ) else: __lowercase : List[Any] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __lowercase : Tuple = """rotary""" if is_finetuned: if dict_path: __lowercase : Any = Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : List[Any] = target_dict.pad_index __lowercase : Optional[int] = target_dict.bos_index __lowercase : List[Any] = target_dict.eos_index __lowercase : List[str] = len(target_dict.symbols ) __lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" ) if not os.path.isdir(lowerCAmelCase_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) __lowercase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched __lowercase : int = 0 __lowercase : Any = 1 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Dict = WavaVecaCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , ) __lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False __lowercase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) __lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) __lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ ) else: __lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ ) if is_finetuned: __lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" ) __lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ ) __lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ ) __lowercase : Dict = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCamelCase : Any = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
306
1
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , __a : List[Any] ) -> Tuple: """simple docstring""" __lowercase : Tuple = 3 __lowercase : Optional[Any] = 250 __lowercase : List[str] = ids_tensor((batch_size, length) , __a ) __lowercase : List[Any] = torch.ones((batch_size, length) , device=__a , dtype=torch.float ) / length return input_ids, scores def lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" __lowercase , __lowercase : str = self._get_tensors(5 ) __lowercase : Any = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__a , __a ) ) __lowercase , __lowercase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(__a , __a ) ) __lowercase , __lowercase : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__a , __a ) ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" __lowercase : str = MaxLengthCriteria(max_length=10 ) __lowercase , __lowercase : List[str] = self._get_tensors(5 ) self.assertFalse(criteria(__a , __a ) ) __lowercase , __lowercase : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__a , __a ) ) __lowercase , __lowercase : List[str] = self._get_tensors(10 ) self.assertTrue(criteria(__a , __a ) ) def lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase : List[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) __lowercase , __lowercase : Tuple = self._get_tensors(5 ) self.assertFalse(criteria(__a , __a ) ) __lowercase , __lowercase : Any = self._get_tensors(9 ) self.assertFalse(criteria(__a , __a ) ) __lowercase , __lowercase : Any = self._get_tensors(10 ) self.assertTrue(criteria(__a , __a ) ) __lowercase : Union[str, Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase , __lowercase : Dict = self._get_tensors(5 ) __lowercase : List[Any] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__a , __a ) ) __lowercase : Optional[Any] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__a , __a ) ) def lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__a ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) __lowercase : List[Any] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__a ) , 1 )
306
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError("""String lengths must match!""" ) __lowercase : str = 0 for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
306
1
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError("""String lengths must match!""" ) __lowercase : str = 0 for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
306
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case_ ( lowerCAmelCase_ : Tuple ): if isinstance(lowerCAmelCase_ , collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCAmelCase : '''simple docstring''' def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" pass def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]: """simple docstring""" __lowercase : List[str] = np.abs((a - b) ).max() self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." ) def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str: """simple docstring""" __lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a ) __lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = after_output[0] __lowercase : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-3 ) def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : str = self.get_vision_text_model(__a , __a ) __lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Union[str, Any] = model( input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a ) __lowercase : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(__a ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase : Optional[int] = to_atuple(vision_model.config.image_size ) __lowercase : List[str] = to_atuple(vision_model.config.patch_size ) __lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase : Dict = output.text_model_output.attentions self.assertEqual(len(__a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]: """simple docstring""" pt_model.to(__a ) pt_model.eval() # prepare inputs __lowercase : Union[str, Any] = inputs_dict __lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): __lowercase : Union[str, Any] = pt_model(**__a ).to_tuple() __lowercase : Tuple = fx_model(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__a ) __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a ) __lowercase : Dict = fx_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__a ) __lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a ) pt_model_loaded.to(__a ) pt_model_loaded.eval() with torch.no_grad(): __lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 ) def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = VisionTextDualEncoderModel(__a ) __lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a ) __lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a ) __lowercase : Any = fx_state self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str: """simple docstring""" __lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a ) __lowercase : Dict = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params ) self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__a ) def lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = self.prepare_config_and_inputs() self.check_save_load(**__a ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" __lowercase : str = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__a ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() __lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" ) __lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" ) __lowercase : Dict = config_inputs_dict self.check_equivalence_pt_to_flax(__a , __a , __a ) self.check_equivalence_flax_to_pt(__a , __a , __a ) @slow def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs() __lowercase : Dict = model_a(**__a ) __lowercase : Any = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__a ) __lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Optional[int] = model_a(**__a ) __lowercase : Tuple = after_outputs[0] __lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-5 ) @require_flax class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" __lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : int = 13 __lowercase : Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : Tuple = random_attention_mask([batch_size, 4] ) __lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict: """simple docstring""" __lowercase : int = FlaxViTModel(__a ) __lowercase : List[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = FlaxViTModelTester(self ) __lowercase : str = FlaxBertModelTester(self ) __lowercase : List[str] = vit_model_tester.prepare_config_and_inputs() __lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Optional[int] = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : Tuple = 13 __lowercase : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : List[Any] = random_attention_mask([batch_size, 4] ) __lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : Dict = FlaxCLIPVisionModel(__a ) __lowercase : Optional[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : List[Any] = FlaxCLIPVisionModelTester(self ) __lowercase : Optional[Any] = FlaxBertModelTester(self ) __lowercase : Any = clip_model_tester.prepare_config_and_inputs() __lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Dict = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) __lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __lowercase : Tuple = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" ) __lowercase : Optional[int] = model(**__a ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
306
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase : Tuple = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[Any] = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
306
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
306
1
def snake_case_ ( lowerCAmelCase_ : int = 200 ): __lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200] __lowercase : List[str] = [0] * (pence + 1) __lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowerCAmelCase_ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
306
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex lowerCamelCase : Union[str, Any] = 10 lowerCamelCase : List[str] = 2_56 def snake_case_ ( lowerCAmelCase_ : List[str] ): if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS: return None __lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ ) for token in set(lowerCAmelCase_ ): min_hash.update(token.encode() ) return min_hash def snake_case_ ( lowerCAmelCase_ : str ): return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0} class lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] , *, __a : float = 0.85 , ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[Any] = duplication_jaccard_threshold __lowercase : Optional[Any] = NUM_PERM __lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __lowercase : List[str] = defaultdict(__a ) def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None: """simple docstring""" __lowercase : List[Any] = self._index.query(__a ) if code_key in self._index.keys: print(F"Duplicate key {code_key}" ) return self._index.insert(__a , __a ) if len(__a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__a ) break else: self._duplicate_clusters[close_duplicates[0]].add(__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]: """simple docstring""" __lowercase : Dict = [] for base, duplicates in self._duplicate_clusters.items(): __lowercase : List[str] = [base] + list(__a ) # reformat the cluster to be a list of dict __lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(__a ) return duplicate_clusters def lowerCAmelCase ( self : Any , __a : int ) -> None: """simple docstring""" __lowercase : Tuple = self.get_duplicate_clusters() with open(__a , """w""" ) as f: json.dump(__a , __a ) def snake_case_ ( lowerCAmelCase_ : str ): __lowercase , __lowercase : Union[str, Any] = element __lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ): with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ): if data is not None: yield data def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ): __lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ): di.add(lowerCAmelCase_ , lowerCAmelCase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : List[str] = get_tokens(lowerCAmelCase_ ) __lowercase : Dict = get_tokens(lowerCAmelCase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowerCamelCase : List[str] = None def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ): __lowercase : Union[str, Any] = [] for elementa in cluster: __lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: __lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __lowercase : Dict = 1 extremes.append(lowerCAmelCase_ ) return extremes def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ): global _shared_dataset __lowercase : Tuple = dataset __lowercase : Optional[int] = [] __lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ): extremes_list.append(lowerCAmelCase_ ) return extremes_list def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ): __lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} __lowercase : int = {} __lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for extremes in extremes_clusters: for element in extremes: __lowercase : Optional[Any] = element __lowercase : int = duplicate_indices - set(extreme_dict.keys() ) __lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __lowercase : List[str] = element["""base_index"""] in extreme_dict if element["is_extreme"]: __lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""] print(F"Original dataset size: {len(lowerCAmelCase_ )}" ) print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" ) print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" ) return ds_filter, duplicate_clusters
306
1
import math import sys import cva import numpy as np def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float ): # For applying gaussian function for each element in matrix. __lowercase : Dict = math.sqrt(lowerCAmelCase_ ) __lowercase : Dict = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ): __lowercase : Any = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : float ): # Creates a gaussian kernel of given dimension. __lowercase : Optional[Any] = np.zeros((kernel_size, kernel_size) ) for i in range(0 , lowerCAmelCase_ ): for j in range(0 , lowerCAmelCase_ ): __lowercase : List[str] = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(lowerCAmelCase_ , lowerCAmelCase_ ) def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : int , ): __lowercase : Dict = np.zeros(img.shape ) __lowercase : Optional[int] = get_gauss_kernel(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase , __lowercase : str = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): __lowercase : Tuple = get_slice(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Union[str, Any] = img_s - img_s[kernel_size // 2, kernel_size // 2] __lowercase : Dict = vec_gaussian(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : str = np.multiply(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : str = np.multiply(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Dict = np.sum(lowerCAmelCase_ ) / np.sum(lowerCAmelCase_ ) __lowercase : Union[str, Any] = val return imga def snake_case_ ( lowerCAmelCase_ : list ): __lowercase : str = args[1] if args[1:] else """../image_data/lena.jpg""" __lowercase : Dict = float(args[2] ) if args[2:] else 1.0 __lowercase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0 if args[4:]: __lowercase : Optional[Any] = int(args[4] ) __lowercase : List[str] = kernel_size + abs(kernel_size % 2 - 1 ) else: __lowercase : int = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase : Optional[Any] = parse_args(sys.argv) lowerCamelCase : Optional[int] = cva.imread(filename, 0) cva.imshow('''input image''', img) lowerCamelCase : Optional[int] = img / 2_55 lowerCamelCase : str = out.astype('''float32''') lowerCamelCase : Optional[Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) lowerCamelCase : Any = out * 2_55 lowerCamelCase : int = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
306
from ...processing_utils import ProcessorMixin class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''image_processor''', '''feature_extractor'''] _A : List[Any] = '''TvltImageProcessor''' _A : Optional[int] = '''TvltFeatureExtractor''' def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]: """simple docstring""" super().__init__(image_processor=__a , feature_extractor=__a ) __lowercase : Union[str, Any] = image_processor __lowercase : Tuple = feature_extractor def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict: """simple docstring""" if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) __lowercase : Tuple = None if images is not None: __lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a ) if images_mixed is not None: __lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a ) if audio is not None: __lowercase : Optional[Any] = self.feature_extractor( __a , *__a , sampling_rate=__a , mask_audio=__a , **__a ) __lowercase : Tuple = {} if audio is not None: output_dict.update(__a ) if images is not None: output_dict.update(__a ) if images_mixed_dict is not None: output_dict.update(__a ) return output_dict @property def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase : int = self.image_processor.model_input_names __lowercase : Union[str, Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
306
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase : Any = tempfile.mkdtemp() # fmt: off __lowercase : Union[str, Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on __lowercase : Optional[int] = dict(zip(__a , range(len(__a ) ) ) ) __lowercase : Optional[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] __lowercase : Optional[Any] = {"""unk_token""": """<unk>"""} __lowercase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __lowercase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__a ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__a ) ) __lowercase : Any = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } __lowercase : List[str] = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__a , __a ) def lowerCAmelCase ( self : Optional[Any] , **__a : Union[str, Any] ) -> List[str]: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **__a ) def lowerCAmelCase ( self : Union[str, Any] , **__a : str ) -> Optional[Any]: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **__a ) def lowerCAmelCase ( self : int , **__a : List[Any] ) -> Optional[Any]: """simple docstring""" return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__a ) def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" __lowercase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __lowercase : List[Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Any = self.get_tokenizer() __lowercase : Union[str, Any] = self.get_rust_tokenizer() __lowercase : List[str] = self.get_image_processor() __lowercase : str = OwlViTProcessor(tokenizer=__a , image_processor=__a ) processor_slow.save_pretrained(self.tmpdirname ) __lowercase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__a ) __lowercase : Optional[Any] = OwlViTProcessor(tokenizer=__a , image_processor=__a ) processor_fast.save_pretrained(self.tmpdirname ) __lowercase : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __a ) self.assertIsInstance(processor_fast.tokenizer , __a ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __a ) self.assertIsInstance(processor_fast.image_processor , __a ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : Union[str, Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __lowercase : Optional[Any] = self.get_image_processor(do_normalize=__a ) __lowercase : Optional[int] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__a ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" __lowercase : List[Any] = self.get_image_processor() __lowercase : str = self.get_tokenizer() __lowercase : Any = OwlViTProcessor(tokenizer=__a , image_processor=__a ) __lowercase : Any = self.prepare_image_inputs() __lowercase : Union[str, Any] = image_processor(__a , return_tensors="""np""" ) __lowercase : str = processor(images=__a , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" __lowercase : Optional[Any] = self.get_image_processor() __lowercase : Dict = self.get_tokenizer() __lowercase : Tuple = OwlViTProcessor(tokenizer=__a , image_processor=__a ) __lowercase : int = """lower newer""" __lowercase : Optional[Any] = processor(text=__a , return_tensors="""np""" ) __lowercase : Optional[int] = tokenizer(__a , return_tensors="""np""" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : List[str] = self.get_image_processor() __lowercase : Dict = self.get_tokenizer() __lowercase : Any = OwlViTProcessor(tokenizer=__a , image_processor=__a ) __lowercase : Any = """lower newer""" __lowercase : List[str] = self.prepare_image_inputs() __lowercase : Dict = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__a ): processor() def lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" __lowercase : Optional[int] = """google/owlvit-base-patch32""" __lowercase : Dict = OwlViTProcessor.from_pretrained(__a ) __lowercase : str = ["""cat""", """nasa badge"""] __lowercase : int = processor(text=__a ) __lowercase : Any = 16 self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__a ): processor() def lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" __lowercase : int = """google/owlvit-base-patch32""" __lowercase : Union[str, Any] = OwlViTProcessor.from_pretrained(__a ) __lowercase : Dict = [["""cat""", """nasa badge"""], ["""person"""]] __lowercase : List[str] = processor(text=__a ) __lowercase : int = 16 __lowercase : Tuple = len(__a ) __lowercase : List[Any] = max([len(__a ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__a ): processor() def lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : List[str] = """google/owlvit-base-patch32""" __lowercase : List[str] = OwlViTProcessor.from_pretrained(__a ) __lowercase : Optional[int] = ["""cat""", """nasa badge"""] __lowercase : List[str] = processor(text=__a ) __lowercase : Optional[int] = 16 __lowercase : str = inputs["""input_ids"""] __lowercase : Optional[int] = [ [49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase : Any = self.get_image_processor() __lowercase : Union[str, Any] = self.get_tokenizer() __lowercase : List[Any] = OwlViTProcessor(tokenizer=__a , image_processor=__a ) __lowercase : str = self.prepare_image_inputs() __lowercase : Dict = self.prepare_image_inputs() __lowercase : Any = processor(images=__a , query_images=__a ) self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__a ): processor() def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase : List[str] = self.get_image_processor() __lowercase : str = self.get_tokenizer() __lowercase : List[str] = OwlViTProcessor(tokenizer=__a , image_processor=__a ) __lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowercase : Optional[int] = processor.batch_decode(__a ) __lowercase : Any = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a )
306
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class lowerCAmelCase : '''simple docstring''' def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = parent __lowercase : int = batch_size __lowercase : Any = seq_length __lowercase : str = is_training __lowercase : str = use_input_mask __lowercase : Optional[int] = use_token_type_ids __lowercase : List[Any] = use_labels __lowercase : Optional[Any] = vocab_size __lowercase : int = hidden_size __lowercase : List[Any] = num_hidden_layers __lowercase : Dict = num_attention_heads __lowercase : Any = intermediate_size __lowercase : Dict = hidden_act __lowercase : Union[str, Any] = hidden_dropout_prob __lowercase : List[Any] = attention_probs_dropout_prob __lowercase : List[str] = max_position_embeddings __lowercase : Union[str, Any] = type_vocab_size __lowercase : Dict = type_sequence_label_size __lowercase : Union[str, Any] = initializer_range __lowercase : List[Any] = num_labels __lowercase : str = num_choices __lowercase : Tuple = scope def lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : int = None if self.use_input_mask: __lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : str = None __lowercase : Optional[Any] = None __lowercase : Tuple = None if self.use_labels: __lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() __lowercase : str = model(__a , attention_mask=__a ) __lowercase : List[Any] = model(__a ) __lowercase : Optional[int] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : List[str] = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() __lowercase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase : Tuple = self.num_labels __lowercase : Any = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase : Any = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) : List[str] = config_and_inputs __lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = False _A : Any = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) _A : Optional[Any] = () _A : List[Any] = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) _A : Optional[Any] = True def lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" __lowercase : Optional[int] = EsmModelTester(self ) __lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase : Union[str, Any] = type self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : int ) -> Any: """simple docstring""" __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : List[str] = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : List[str] = EsmEmbeddings(config=__a ) __lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) __lowercase : int = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) __lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : Optional[Any] = EsmEmbeddings(config=__a ) __lowercase : Optional[int] = torch.empty(2 , 4 , 30 ) __lowercase : Tuple = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] __lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) __lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @require_torch class lowerCAmelCase ( __a ): '''simple docstring''' @slow def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase : List[str] = model(__a )[0] __lowercase : Union[str, Any] = 33 __lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) __lowercase : List[Any] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __lowercase : Any = model(__a )[0] # compare the actual values for a slice. __lowercase : int = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
306
1
from scipy.stats import spearmanr import datasets lowerCamelCase : List[str] = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' lowerCamelCase : List[str] = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' lowerCamelCase : Union[str, Any] = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]: """simple docstring""" __lowercase : Optional[Any] = spearmanr(__a , __a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
306
def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : int = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def snake_case_ ( lowerCAmelCase_ : int = 5000 ): __lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )] for i, pentagonal_i in enumerate(lowerCAmelCase_ ): for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ): __lowercase : int = pentagonal_nums[j] __lowercase : Optional[int] = pentagonal_i + pentagonal_j __lowercase : Union[str, Any] = pentagonal_j - pentagonal_i if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ): return b return -1 if __name__ == "__main__": print(f'''{solution() = }''')
306
1
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ): return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) ) def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ): if dataset.ndim != value_array.ndim: __lowercase : Dict = ( """Wrong input data's dimensions... """ F"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) raise ValueError(lowerCAmelCase_ ) try: if dataset.shape[1] != value_array.shape[1]: __lowercase : Dict = ( """Wrong input data's shape... """ F"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) raise ValueError(lowerCAmelCase_ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("""Wrong shape""" ) if dataset.dtype != value_array.dtype: __lowercase : Tuple = ( """Input data have different datatype... """ F"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) raise TypeError(lowerCAmelCase_ ) __lowercase : Union[str, Any] = [] for value in value_array: __lowercase : Optional[int] = euclidean(lowerCAmelCase_ , dataset[0] ) __lowercase : Optional[Any] = dataset[0].tolist() for dataset_value in dataset[1:]: __lowercase : Union[str, Any] = euclidean(lowerCAmelCase_ , lowerCAmelCase_ ) if dist > temp_dist: __lowercase : Tuple = temp_dist __lowercase : str = dataset_value.tolist() answer.append([vector, dist] ) return answer def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ): return np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) / (norm(lowerCAmelCase_ ) * norm(lowerCAmelCase_ )) if __name__ == "__main__": import doctest doctest.testmod()
306
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[Any] = (DPMSolverSDEScheduler,) _A : Dict = 10 def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]: """simple docstring""" __lowercase : Any = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**__a ) return config def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = self.scheduler_classes[0] __lowercase : List[str] = self.get_scheduler_config() __lowercase : Any = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[Any] = self.dummy_model() __lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Optional[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Optional[Any] = scheduler.step(__a , __a , __a ) __lowercase : str = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" ) __lowercase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[int] = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Dict = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Dict = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[int] = model(__a , __a ) __lowercase : Optional[int] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : List[str] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config() __lowercase : Optional[int] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : int = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowercase : int = scheduler.scale_model_input(__a , __a ) __lowercase : List[str] = model(__a , __a ) __lowercase : List[str] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : List[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : str = self.scheduler_classes[0] __lowercase : List[Any] = self.get_scheduler_config() __lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : List[str] = self.dummy_model() __lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma __lowercase : str = sample.to(__a ) for t in scheduler.timesteps: __lowercase : List[Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Any = scheduler.step(__a , __a , __a ) __lowercase : Optional[Any] = output.prev_sample __lowercase : Any = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
306
1
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case_ ( lowerCAmelCase_ : Tuple ): if isinstance(lowerCAmelCase_ , collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCAmelCase : '''simple docstring''' def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" pass def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]: """simple docstring""" __lowercase : List[str] = np.abs((a - b) ).max() self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." ) def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str: """simple docstring""" __lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a ) __lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = after_output[0] __lowercase : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-3 ) def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : str = self.get_vision_text_model(__a , __a ) __lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Union[str, Any] = model( input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a ) __lowercase : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(__a ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase : Optional[int] = to_atuple(vision_model.config.image_size ) __lowercase : List[str] = to_atuple(vision_model.config.patch_size ) __lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase : Dict = output.text_model_output.attentions self.assertEqual(len(__a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]: """simple docstring""" pt_model.to(__a ) pt_model.eval() # prepare inputs __lowercase : Union[str, Any] = inputs_dict __lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): __lowercase : Union[str, Any] = pt_model(**__a ).to_tuple() __lowercase : Tuple = fx_model(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__a ) __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a ) __lowercase : Dict = fx_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__a ) __lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a ) pt_model_loaded.to(__a ) pt_model_loaded.eval() with torch.no_grad(): __lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 ) def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = VisionTextDualEncoderModel(__a ) __lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a ) __lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a ) __lowercase : Any = fx_state self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str: """simple docstring""" __lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a ) __lowercase : Dict = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params ) self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__a ) def lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = self.prepare_config_and_inputs() self.check_save_load(**__a ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" __lowercase : str = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__a ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() __lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" ) __lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" ) __lowercase : Dict = config_inputs_dict self.check_equivalence_pt_to_flax(__a , __a , __a ) self.check_equivalence_flax_to_pt(__a , __a , __a ) @slow def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs() __lowercase : Dict = model_a(**__a ) __lowercase : Any = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__a ) __lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Optional[int] = model_a(**__a ) __lowercase : Tuple = after_outputs[0] __lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-5 ) @require_flax class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" __lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : int = 13 __lowercase : Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : Tuple = random_attention_mask([batch_size, 4] ) __lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict: """simple docstring""" __lowercase : int = FlaxViTModel(__a ) __lowercase : List[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = FlaxViTModelTester(self ) __lowercase : str = FlaxBertModelTester(self ) __lowercase : List[str] = vit_model_tester.prepare_config_and_inputs() __lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Optional[int] = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : Tuple = 13 __lowercase : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : List[Any] = random_attention_mask([batch_size, 4] ) __lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : Dict = FlaxCLIPVisionModel(__a ) __lowercase : Optional[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : List[Any] = FlaxCLIPVisionModelTester(self ) __lowercase : Optional[Any] = FlaxBertModelTester(self ) __lowercase : Any = clip_model_tester.prepare_config_and_inputs() __lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Dict = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) __lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __lowercase : Tuple = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" ) __lowercase : Optional[int] = model(**__a ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
306
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate lowerCamelCase : str = trt.Logger(trt.Logger.WARNING) lowerCamelCase : Any = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) lowerCamelCase : Optional[Any] = logging.getLogger(__name__) lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=3_84, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=1_28, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) lowerCamelCase : Dict = parser.parse_args() if args.tokenizer_name: lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) lowerCamelCase : List[str] = args.per_device_eval_batch_size lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties lowerCamelCase : List[str] = True lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine''' if args.fpaa: lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine''' if args.inta: lowerCamelCase : int = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)] lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: lowerCamelCase : List[str] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) lowerCamelCase : Optional[int] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) lowerCamelCase : Optional[Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ): __lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ ) # start time __lowercase : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Synchronize the stream and take time stream.synchronize() # end time __lowercase : int = time.time() __lowercase : Union[str, Any] = end_time - start_time __lowercase : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. lowerCamelCase : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0] lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1] lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). lowerCamelCase : Dict = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length) def snake_case_ ( lowerCAmelCase_ : int ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __lowercase : str = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __lowercase : List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __lowercase : Any = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ ) __lowercase : List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __lowercase : List[str] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __lowercase : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples lowerCamelCase : Tuple = raw_datasets['''validation'''] # Validation Feature Creation lowerCamelCase : Optional[int] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) lowerCamelCase : Union[str, Any] = default_data_collator lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) lowerCamelCase : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. __lowercase : int = postprocess_qa_predictions( examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __lowercase : Optional[int] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ ) lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def snake_case_ ( lowerCAmelCase_ : str ): return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize # Allocate device memory for inputs and outputs. lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes) lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. lowerCamelCase : Optional[int] = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') lowerCamelCase : int = 0.0 lowerCamelCase : List[str] = 0 lowerCamelCase : List[str] = timeit.default_timer() lowerCamelCase : List[Any] = None for step, batch in enumerate(eval_dataloader): lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs lowerCamelCase : Optional[Any] = torch.tensor(start_logits) lowerCamelCase : List[str] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset)) lowerCamelCase : Dict = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00)) logger.info('''Total Number of Inference = %d''', niter) lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds) lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
306
1
def snake_case_ ( lowerCAmelCase_ : Optional[int] ): return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def snake_case_ ( lowerCAmelCase_ : dict[int, list[int]] ): __lowercase : Tuple = 0 __lowercase : Dict = len(lowerCAmelCase_ ) # No of vertices in graph __lowercase : List[Any] = [0] * n __lowercase : Tuple = [False] * n def dfs(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] ): __lowercase : Tuple = True __lowercase : List[str] = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , id_ ) __lowercase : Optional[Any] = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge __lowercase : Union[str, Any] = min(low[at] , low[to] ) __lowercase : list[tuple[int, int]] = [] for i in range(lowerCAmelCase_ ): if not visited[i]: dfs(lowerCAmelCase_ , -1 , lowerCAmelCase_ , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
306
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : str = { '''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''', } class lowerCAmelCase ( __a ): '''simple docstring''' _A : int = '''nllb-moe''' _A : List[str] = ['''past_key_values'''] _A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any: """simple docstring""" __lowercase : int = vocab_size __lowercase : List[Any] = max_position_embeddings __lowercase : Tuple = d_model __lowercase : str = encoder_ffn_dim __lowercase : List[str] = encoder_layers __lowercase : int = encoder_attention_heads __lowercase : List[Any] = decoder_ffn_dim __lowercase : int = decoder_layers __lowercase : Optional[int] = decoder_attention_heads __lowercase : Union[str, Any] = dropout __lowercase : str = attention_dropout __lowercase : Any = activation_dropout __lowercase : List[Any] = activation_function __lowercase : List[str] = init_std __lowercase : Optional[int] = encoder_layerdrop __lowercase : str = decoder_layerdrop __lowercase : Dict = use_cache __lowercase : Optional[Any] = encoder_layers __lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True __lowercase : List[Any] = router_z_loss_coef __lowercase : Tuple = router_aux_loss_coef __lowercase : str = decoder_sparse_step __lowercase : Any = encoder_sparse_step __lowercase : str = num_experts __lowercase : List[Any] = expert_capacity __lowercase : int = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) __lowercase : Optional[int] = router_dtype __lowercase : Any = router_ignore_padding_tokens __lowercase : Optional[Any] = batch_prioritized_routing __lowercase : str = second_expert_policy __lowercase : List[str] = normalize_router_prob_before_dropping __lowercase : List[Any] = moe_eval_capacity_token_fraction __lowercase : List[str] = moe_token_dropout __lowercase : Optional[Any] = output_router_logits super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
306
1
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCAmelCase : '''simple docstring''' @staticmethod def lowerCAmelCase ( *__a : Tuple , **__a : Optional[int] ) -> Dict: """simple docstring""" pass def snake_case_ ( lowerCAmelCase_ : Image ): __lowercase : Optional[Any] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def snake_case_ ( lowerCAmelCase_ : Image ): __lowercase : Optional[Any] = np.array(lowerCAmelCase_ ) __lowercase : int = npimg.shape return {"hash": hashimage(lowerCAmelCase_ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) _A : Tuple = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCAmelCase ( self : int , __a : Union[str, Any] , __a : Any , __a : List[Any] ) -> str: """simple docstring""" __lowercase : List[str] = MaskGenerationPipeline(model=__a , image_processor=__a ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCAmelCase ( self : Union[str, Any] , __a : List[Any] , __a : Union[str, Any] ) -> Optional[int]: """simple docstring""" pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass @slow @require_torch def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" __lowercase : List[Any] = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __lowercase : str = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __lowercase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" __lowercase : Dict = """facebook/sam-vit-huge""" __lowercase : List[Any] = pipeline("""mask-generation""" , model=__a ) __lowercase : Dict = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __lowercase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
306
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : Optional[Any] = { '''configuration_poolformer''': [ '''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PoolFormerConfig''', '''PoolFormerOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ['''PoolFormerFeatureExtractor'''] lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ '''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PoolFormerForImageClassification''', '''PoolFormerModel''', '''PoolFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
306
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , __a : List[str] , __a : Optional[int]=7 , __a : Tuple=3 , __a : Optional[Any]=30 , __a : Tuple=400 , __a : Dict=True , __a : Union[str, Any]=None , __a : List[Any]=True , __a : Any=[0.5, 0.5, 0.5] , __a : Union[str, Any]=[0.5, 0.5, 0.5] , __a : List[str]=True , __a : Optional[Any]=1 / 255 , __a : int=True , ) -> Optional[Any]: """simple docstring""" __lowercase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} __lowercase : Optional[Any] = parent __lowercase : List[str] = batch_size __lowercase : str = num_channels __lowercase : Optional[int] = min_resolution __lowercase : Dict = max_resolution __lowercase : Any = do_resize __lowercase : str = size __lowercase : Union[str, Any] = do_normalize __lowercase : Dict = image_mean __lowercase : Dict = image_std __lowercase : List[str] = do_rescale __lowercase : Tuple = rescale_factor __lowercase : str = do_pad def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase ( self : Optional[int] , __a : str , __a : str=False ) -> Optional[int]: """simple docstring""" if not batched: __lowercase : Optional[int] = image_inputs[0] if isinstance(__a , Image.Image ): __lowercase , __lowercase : List[Any] = image.size else: __lowercase , __lowercase : Dict = image.shape[1], image.shape[2] if w < h: __lowercase : Optional[int] = int(self.size["""shortest_edge"""] * h / w ) __lowercase : List[str] = self.size["""shortest_edge"""] elif w > h: __lowercase : int = self.size["""shortest_edge"""] __lowercase : Optional[int] = int(self.size["""shortest_edge"""] * w / h ) else: __lowercase : int = self.size["""shortest_edge"""] __lowercase : Any = self.size["""shortest_edge"""] else: __lowercase : Any = [] for image in image_inputs: __lowercase , __lowercase : List[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowercase : Optional[Any] = max(__a , key=lambda __a : item[0] )[0] __lowercase : int = max(__a , key=lambda __a : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : int = YolosImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" __lowercase : Union[str, Any] = YolosImageProcessingTester(self ) @property def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , """image_mean""" ) ) self.assertTrue(hasattr(__a , """image_std""" ) ) self.assertTrue(hasattr(__a , """do_normalize""" ) ) self.assertTrue(hasattr(__a , """do_resize""" ) ) self.assertTrue(hasattr(__a , """size""" ) ) def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , __a ) __lowercase : Optional[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__a ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , __a ) def lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" pass def lowerCAmelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input __lowercase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __lowercase , __lowercase : Dict = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowercase , __lowercase : List[str] = self.image_processor_tester.get_expected_values(__a , batched=__a ) __lowercase : List[Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input __lowercase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __lowercase , __lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowercase : List[str] = image_processing(__a , return_tensors="""pt""" ).pixel_values __lowercase , __lowercase : Dict = self.image_processor_tester.get_expected_values(__a , batched=__a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input __lowercase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __lowercase , __lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowercase : Optional[Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values __lowercase , __lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" __lowercase : List[str] = self.image_processing_class(**self.image_processor_dict ) __lowercase : Any = self.image_processing_class(do_resize=__a , do_normalize=__a , do_rescale=__a ) # create random PyTorch tensors __lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors __lowercase : int = image_processing_a.pad(__a , return_tensors="""pt""" ) __lowercase : List[Any] = image_processing_a(__a , return_tensors="""pt""" ) self.assertTrue( torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: __lowercase : List[str] = json.loads(f.read() ) __lowercase : int = {"""image_id""": 39769, """annotations""": target} # encode them __lowercase : int = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" ) __lowercase : Dict = image_processing(images=__a , annotations=__a , return_tensors="""pt""" ) # verify pixel values __lowercase : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __a ) __lowercase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __a , atol=1E-4 ) ) # verify area __lowercase : Union[str, Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __a ) ) # verify boxes __lowercase : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __a ) __lowercase : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __a , atol=1E-3 ) ) # verify image_id __lowercase : Optional[int] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __a ) ) # verify is_crowd __lowercase : str = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __a ) ) # verify class_labels __lowercase : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __a ) ) # verify orig_size __lowercase : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __a ) ) # verify size __lowercase : Any = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __a ) ) @slow def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: __lowercase : Tuple = json.loads(f.read() ) __lowercase : str = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} __lowercase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __lowercase : int = YolosImageProcessor(format="""coco_panoptic""" ) __lowercase : int = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors="""pt""" ) # verify pixel values __lowercase : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __a ) __lowercase : int = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __a , atol=1E-4 ) ) # verify area __lowercase : Dict = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __a ) ) # verify boxes __lowercase : Any = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __a ) __lowercase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __a , atol=1E-3 ) ) # verify image_id __lowercase : Tuple = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __a ) ) # verify is_crowd __lowercase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __a ) ) # verify class_labels __lowercase : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __a ) ) # verify masks __lowercase : Dict = 822873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __a ) # verify orig_size __lowercase : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __a ) ) # verify size __lowercase : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __a ) )
306
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = 2 __lowercase : Union[str, Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowerCAmelCase_ ) if n > 1: factors.append(lowerCAmelCase_ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
306
1