code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' a_ : str = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ a_ : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}] a_ : int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
55
from math import pi def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
24
0
'''simple docstring''' import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __magic_name__ ( self : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : List[str] =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowercase , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(__lowercase , '''num_heads''' ) ) class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , __lowercase : Optional[Any] , __lowercase : Any=13 , __lowercase : Optional[Any]=64 , __lowercase : Optional[int]=3 , __lowercase : List[Any]=[16, 48, 96] , __lowercase : str=[1, 3, 6] , __lowercase : Any=[1, 2, 10] , __lowercase : Optional[Any]=[7, 3, 3] , __lowercase : Optional[int]=[4, 2, 2] , __lowercase : str=[2, 1, 1] , __lowercase : Union[str, Any]=[2, 2, 2] , __lowercase : int=[False, False, True] , __lowercase : str=[0.0, 0.0, 0.0] , __lowercase : List[str]=0.02 , __lowercase : Tuple=1e-12 , __lowercase : List[Any]=True , __lowercase : Dict=True , __lowercase : str=2 , ) -> str: SCREAMING_SNAKE_CASE__ : List[Any] =parent SCREAMING_SNAKE_CASE__ : List[str] =batch_size SCREAMING_SNAKE_CASE__ : Any =image_size SCREAMING_SNAKE_CASE__ : Any =patch_sizes SCREAMING_SNAKE_CASE__ : Dict =patch_stride SCREAMING_SNAKE_CASE__ : List[Any] =patch_padding SCREAMING_SNAKE_CASE__ : Dict =is_training SCREAMING_SNAKE_CASE__ : Any =use_labels SCREAMING_SNAKE_CASE__ : int =num_labels SCREAMING_SNAKE_CASE__ : str =num_channels SCREAMING_SNAKE_CASE__ : str =embed_dim SCREAMING_SNAKE_CASE__ : str =num_heads SCREAMING_SNAKE_CASE__ : Tuple =stride_kv SCREAMING_SNAKE_CASE__ : Tuple =depth SCREAMING_SNAKE_CASE__ : Dict =cls_token SCREAMING_SNAKE_CASE__ : Optional[int] =attention_drop_rate SCREAMING_SNAKE_CASE__ : Tuple =initializer_range SCREAMING_SNAKE_CASE__ : List[Any] =layer_norm_eps def __magic_name__ ( self : Union[str, Any] ) -> Any: SCREAMING_SNAKE_CASE__ : Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : str =ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Any =self.get_config() return config, pixel_values, labels def __magic_name__ ( self : int ) -> Dict: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __magic_name__ ( self : Tuple , __lowercase : int , __lowercase : List[str] , __lowercase : Dict ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =CvtModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =(self.image_size, self.image_size) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size[0], image_size[1] for i in range(len(self.depth ) ): SCREAMING_SNAKE_CASE__ : List[Any] =floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) SCREAMING_SNAKE_CASE__ : Any =floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __magic_name__ ( self : Tuple , __lowercase : int , __lowercase : List[str] , __lowercase : Dict ) -> str: SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels SCREAMING_SNAKE_CASE__ : Optional[Any] =CvtForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Any =model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__ ( self : List[Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Dict =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =config_and_inputs SCREAMING_SNAKE_CASE__ : Dict ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = (CvtModel, CvtForImageClassification) if is_torch_available() else () snake_case_ = ( {"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def __magic_name__ ( self : int ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =CvtModelTester(self ) SCREAMING_SNAKE_CASE__ : Dict =ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 ) def __magic_name__ ( self : Any ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __magic_name__ ( self : str ) -> int: return @unittest.skip(reason='''Cvt does not output attentions''' ) def __magic_name__ ( self : Tuple ) -> Any: pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def __magic_name__ ( self : Optional[Any] ) -> str: pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def __magic_name__ ( self : List[Any] ) -> str: pass def __magic_name__ ( self : Optional[Any] ) -> int: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : Union[str, Any] =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Tuple =['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def __magic_name__ ( self : Any ) -> Dict: SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : Any ) -> Tuple: def check_hidden_states_output(__lowercase : Dict , __lowercase : List[Any] , __lowercase : Any ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : int =outputs.hidden_states SCREAMING_SNAKE_CASE__ : Dict =len(self.model_tester.depth ) self.assertEqual(len(__lowercase ) , __lowercase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Any =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : List[str] =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def __magic_name__ ( self : Any ) -> int: SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __magic_name__ ( self : str ) -> List[str]: pass @slow def __magic_name__ ( self : Any ) -> Optional[int]: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : str =CvtModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __magic_name__ ( self : int ) -> Tuple: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __magic_name__ ( self : List[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : str =CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =self.default_image_processor SCREAMING_SNAKE_CASE__ : List[str] =prepare_img() SCREAMING_SNAKE_CASE__ : int =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Dict =model(**__lowercase ) # verify the logits SCREAMING_SNAKE_CASE__ : Optional[int] =torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : str =torch.tensor([0.9285, 0.9015, -0.3150] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
222
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """facebook/bart-large-mnli""" snake_case_ = ( """This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """ """should be the text to classify, and `labels`, which should be the list of labels to use for classification. """ """It returns the most likely label in the list of provided `labels` for the input text.""" ) snake_case_ = """text_classifier""" snake_case_ = AutoTokenizer snake_case_ = AutoModelForSequenceClassification snake_case_ = ["""text""", ["""text"""]] snake_case_ = ["""text"""] def __magic_name__ ( self : Any ) -> Any: super().setup() SCREAMING_SNAKE_CASE__ : int =self.model.config SCREAMING_SNAKE_CASE__ : int =-1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): SCREAMING_SNAKE_CASE__ : Any =int(__lowercase ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def __magic_name__ ( self : str , __lowercase : Any , __lowercase : Optional[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Dict =labels return self.pre_processor( [text] * len(__lowercase ) , [F"This example is {label}" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , ) def __magic_name__ ( self : List[Any] , __lowercase : str ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =outputs.logits SCREAMING_SNAKE_CASE__ : Any =torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
222
1
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( a__ ): lowerCamelCase : Any =(DPMSolverSinglestepScheduler,) lowerCamelCase : str =(("""num_inference_steps""", 25),) def __a ( self , **lowerCAmelCase__ ) -> str: a : List[Any] = { "num_train_timesteps": 1000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf" ), "variance_type": None, } config.update(**lowerCAmelCase__ ) return config def __a ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Union[str, Any]: a : Any = dict(self.forward_default_kwargs ) a : Dict = kwargs.pop("num_inference_steps" , lowerCAmelCase__ ) a : List[str] = self.dummy_sample a : List[Any] = 0.1 * sample a : int = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: a : Tuple = self.get_scheduler_config(**lowerCAmelCase__ ) a : Dict = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residuals a : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__ ) a : str = scheduler_class.from_pretrained(lowerCAmelCase__ ) new_scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residuals a : int = dummy_past_residuals[: new_scheduler.config.solver_order] a, a : Union[str, Any] = sample, sample for t in range(lowerCAmelCase__ , time_step + scheduler.config.solver_order + 1 ): a : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a : Optional[Any] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __a ( self ) -> Tuple: pass def __a ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Tuple: a : Union[str, Any] = dict(self.forward_default_kwargs ) a : int = kwargs.pop("num_inference_steps" , lowerCAmelCase__ ) a : List[str] = self.dummy_sample a : Optional[int] = 0.1 * sample a : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: a : Dict = self.get_scheduler_config() a : str = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residuals (must be after setting timesteps) a : Tuple = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__ ) a : List[str] = scheduler_class.from_pretrained(lowerCAmelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residual (must be after setting timesteps) a : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order] a : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a : Optional[int] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __a ( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict: if scheduler is None: a : Dict = self.scheduler_classes[0] a : int = self.get_scheduler_config(**lowerCAmelCase__ ) a : Tuple = scheduler_class(**lowerCAmelCase__ ) a : List[Any] = self.scheduler_classes[0] a : Optional[Any] = self.get_scheduler_config(**lowerCAmelCase__ ) a : List[Any] = scheduler_class(**lowerCAmelCase__ ) a : int = 10 a : str = self.dummy_model() a : Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): a : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ ) a : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample return sample def __a ( self ) -> str: a : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) a : Optional[int] = 50 a : List[str] = self.dummy_model() a : Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): a : Any = model(lowerCAmelCase__ , lowerCAmelCase__ ) a : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample a : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_mean.item() - 0.2_574 ) < 1E-3 def __a ( self ) -> Union[str, Any]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def __a ( self ) -> Optional[int]: # make sure that iterating over schedulers with same config names gives same results # for defaults a : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) a : List[Any] = self.full_loop(scheduler=lowerCAmelCase__ ) a : int = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 a : List[Any] = DEISMultistepScheduler.from_config(scheduler.config ) a : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config ) a : int = UniPCMultistepScheduler.from_config(scheduler.config ) a : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config ) a : List[Any] = self.full_loop(scheduler=lowerCAmelCase__ ) a : int = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def __a ( self ) -> Optional[int]: self.check_over_configs(thresholding=lowerCAmelCase__ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , algorithm_type="dpmsolver++" , solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , ) def __a ( self ) -> Union[str, Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def __a ( self ) -> str: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , ) a : Optional[Any] = self.full_loop( solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , ) assert not torch.isnan(lowerCAmelCase__ ).any(), "Samples have nan numbers" def __a ( self ) -> Any: self.check_over_configs(lower_order_final=lowerCAmelCase__ ) self.check_over_configs(lower_order_final=lowerCAmelCase__ ) def __a ( self ) -> List[Any]: self.check_over_configs(lambda_min_clipped=-float("inf" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def __a ( self ) -> Tuple: self.check_over_configs(variance_type=lowerCAmelCase__ ) self.check_over_configs(variance_type="learned_range" ) def __a ( self ) -> Any: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=lowerCAmelCase__ , time_step=0 ) def __a ( self ) -> List[str]: a : Any = self.full_loop() a : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def __a ( self ) -> Any: a : Optional[int] = self.full_loop(use_karras_sigmas=lowerCAmelCase__ ) a : int = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_mean.item() - 0.2_248 ) < 1E-3 def __a ( self ) -> Optional[int]: a : Optional[Any] = self.full_loop(prediction_type="v_prediction" ) a : Optional[int] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_mean.item() - 0.1_453 ) < 1E-3 def __a ( self ) -> Optional[int]: a : List[str] = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=lowerCAmelCase__ ) a : int = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_mean.item() - 0.0_649 ) < 1E-3 def __a ( self ) -> Any: a : int = self.scheduler_classes[0] a : Optional[Any] = self.get_scheduler_config(thresholding=lowerCAmelCase__ , dynamic_thresholding_ratio=0 ) a : Tuple = scheduler_class(**lowerCAmelCase__ ) a : Optional[int] = 10 a : List[Any] = self.dummy_model() a : List[str] = self.dummy_sample_deter.half() scheduler.set_timesteps(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): a : Any = model(lowerCAmelCase__ , lowerCAmelCase__ ) a : Any = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample assert sample.dtype == torch.floataa
105
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float: '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(F'''{price_plus_tax(100, 0.25) = }''') print(F'''{price_plus_tax(125.50, 0.05) = }''')
105
1
'''simple docstring''' import numpy as np import qiskit def lowerCamelCase ( UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int | None = None ) -> str: lowercase_ : Tuple = np.random.default_rng(seed=UpperCAmelCase__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. lowercase_ : Dict = 6 * key_len # Measurement basis for Alice's qubits. lowercase_ : int = rng.integers(2 , size=UpperCAmelCase__ ) # The set of states Alice will prepare. lowercase_ : Dict = rng.integers(2 , size=UpperCAmelCase__ ) # Measurement basis for Bob's qubits. lowercase_ : Tuple = rng.integers(2 , size=UpperCAmelCase__ ) # Quantum Circuit to simulate BB84 lowercase_ : int = qiskit.QuantumCircuit(UpperCAmelCase__ , name="""BB84""" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(UpperCAmelCase__ ): if alice_state[index] == 1: bbaa_circ.x(UpperCAmelCase__ ) if alice_basis[index] == 1: bbaa_circ.h(UpperCAmelCase__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(UpperCAmelCase__ ): if bob_basis[index] == 1: bbaa_circ.h(UpperCAmelCase__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. lowercase_ : int = qiskit.Aer.get_backend("""aer_simulator""" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. lowercase_ : List[str] = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1 , seed_simulator=UpperCAmelCase__ ) # Returns the result of measurement. lowercase_ : Any = job.result().get_counts(UpperCAmelCase__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. lowercase_ : Union[str, Any] = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. lowercase_ : Union[str, Any] = gen_key[:key_len] if len(UpperCAmelCase__ ) >= key_len else gen_key.ljust(UpperCAmelCase__ , """0""" ) return key if __name__ == "__main__": print(f"""The generated key is : {bbaa(8, seed=0)}""") from doctest import testmod testmod()
21
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]: if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class __magic_name__ : def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ): pass def SCREAMING_SNAKE_CASE_ ( self : str ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ): lowercase_ : Optional[Any] = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ): lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ): lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ): lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Any = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) lowercase_ : Union[str, Any] = after_output[0] lowercase_ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ): lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) lowercase_ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) lowercase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase_ : List[str] = to_atuple(vision_model.config.image_size ) lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowercase_ : Optional[Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowercase_ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ): pt_model.to(lowercase_ ) pt_model.eval() # prepare inputs lowercase_ : int = inputs_dict lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowercase_ : str = pt_model(**lowercase_ ).to_tuple() lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_ ) lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ ) lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_ ) lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ ) pt_model_loaded.to(lowercase_ ) pt_model_loaded.eval() with torch.no_grad(): lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ ) lowercase_ : Tuple = fx_state self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ): lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) lowercase_ : int = VisionTextDualEncoderModel(lowercase_ ) lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ ) lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params ) self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): lowercase_ : Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[Any] = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Tuple = self.prepare_config_and_inputs() lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" ) lowercase_ : int = config_inputs_dict.pop("""text_config""" ) lowercase_ : Optional[int] = config_inputs_dict self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ ) self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs() lowercase_ : Dict = model_a(**lowercase_ ) lowercase_ : str = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ ) lowercase_ : str = model_a(**lowercase_ ) lowercase_ : Union[str, Any] = after_outputs[0] lowercase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1E-5 ) @require_flax class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : str = random_attention_mask([batch_size, 4] ) lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ ) lowercase_ : Dict = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : Any = FlaxViTModelTester(self ) lowercase_ : Optional[Any] = FlaxBertModelTester(self ) lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs() lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : List[str] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : int ): lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , ) lowercase_ : List[str] = 13 lowercase_ : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowercase_ : Tuple = random_attention_mask([batch_size, 4] ) lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ): lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ ) lowercase_ : Any = FlaxBertModel(lowercase_ ) return vision_model, text_model def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self ) lowercase_ : Tuple = FlaxBertModelTester(self ) lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs() lowercase_ : Any = bert_model_tester.prepare_config_and_inputs() lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __magic_name__ ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowercase_ : Optional[int] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" ) lowercase_ : List[str] = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
21
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) a ={ """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""LayoutLMv2FeatureExtractor"""] a =["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =[ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
73
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> float: __lowerCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: __lowerCamelCase : Dict = 1 - (matter_density + radiation_density + dark_energy) __lowerCamelCase : Union[str, Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) __lowerCamelCase : List[Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation a =0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
73
1
'''simple docstring''' import math def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(__UpperCAmelCase ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __magic_name__ ( __UpperCAmelCase = 0.1 ) -> int: '''simple docstring''' snake_case_ = 3 snake_case_ = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(__UpperCAmelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
362
'''simple docstring''' import math from collections.abc import Callable def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float: '''simple docstring''' snake_case_ = xa snake_case_ = xa while True: if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ): raise ZeroDivisionError('''float division by zero, could not find root''' ) snake_case_ = x_na - ( function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na snake_case_ = x_na snake_case_ = x_na def __magic_name__ ( __UpperCAmelCase ) -> float: '''simple docstring''' return math.pow(__UpperCAmelCase, 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
72
0
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__( lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, ) A : str = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths} A : List[Any] = Text( cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, ) def _lowerCAmelCase ( self ): # Build iterable dataset if self.streaming: A : Dict = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A : Optional[Any] = None A : List[str] = None A : Union[str, Any] = None A : str = None self.builder.download_and_prepare( download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, ) A : List[Any] = self.builder.as_dataset( split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory ) return dataset
116
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 1.0, lowerCamelCase__ = None, ): super().__init__() A : Union[str, Any] = initial_learning_rate A : List[Any] = warmup_steps A : int = power A : Optional[int] = decay_schedule_fn A : int = name def __call__( self, lowerCamelCase__ ): with tf.name_scope(self.name or """WarmUp""" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. A : str = tf.cast(lowerCamelCase__, tf.floataa ) A : List[Any] = tf.cast(self.warmup_steps, tf.floataa ) A : Dict = global_step_float / warmup_steps_float A : Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCamelCase__, self.power ) return tf.cond( global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=lowerCamelCase__, ) def _lowerCAmelCase ( self ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 0.9 , _lowerCAmelCase = 0.999 , _lowerCAmelCase = 1e-8 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = None , ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=_lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCAmelCase , ) if num_warmup_steps: A : Dict = WarmUp( initial_learning_rate=_lowerCAmelCase , decay_schedule_fn=_lowerCAmelCase , warmup_steps=_lowerCAmelCase , ) if weight_decay_rate > 0.0: A : str = AdamWeightDecay( learning_rate=_lowerCAmelCase , weight_decay_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_lowerCAmelCase , ) else: A : Optional[int] = tf.keras.optimizers.Adam( learning_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, lowerCamelCase__ = 0.001, lowerCamelCase__ = 0.9, lowerCamelCase__ = 0.999, lowerCamelCase__ = 1e-7, lowerCamelCase__ = False, lowerCamelCase__ = 0.0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "AdamWeightDecay", **lowerCamelCase__, ): super().__init__(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ) A : int = weight_decay_rate A : Any = include_in_weight_decay A : Dict = exclude_from_weight_decay @classmethod def _lowerCAmelCase ( cls, lowerCamelCase__ ): A : Tuple = {"""WarmUp""": WarmUp} return super(lowerCamelCase__, cls ).from_config(lowerCamelCase__, custom_objects=lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): super(lowerCamelCase__, self )._prepare_local(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) A : List[str] = tf.constant( self.weight_decay_rate, name="""adam_weight_decay_rate""" ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): A : Any = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""], use_locking=self._use_locking, ) return tf.no_op() def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None, **lowerCamelCase__ ): A , A : Dict = list(zip(*lowerCamelCase__ ) ) return super(lowerCamelCase__, self ).apply_gradients(zip(lowerCamelCase__, lowerCamelCase__ ), name=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} A : Union[str, Any] = apply_state or {} A : Optional[int] = apply_state.get((var_device, var_dtype) ) if coefficients is None: A : Dict = self._fallback_apply_state(lowerCamelCase__, lowerCamelCase__ ) A : List[Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ): A , A : str = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ ) A : Any = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) with tf.control_dependencies([decay] ): return super(lowerCamelCase__, self )._resource_apply_dense(lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ): A , A : Tuple = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ ) A : Optional[Any] = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) with tf.control_dependencies([decay] ): return super(lowerCamelCase__, self )._resource_apply_sparse(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Dict = super().get_config() config.update({"""weight_decay_rate""": self.weight_decay_rate} ) return config def _lowerCAmelCase ( self, lowerCamelCase__ ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None: return False return True class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self ): A : List[str] = [] A : List[str] = None @property def _lowerCAmelCase ( self ): if self._accum_steps is None: A : str = tf.Variable( tf.constant(0, dtype=tf.intaa ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, ) return self._accum_steps.value() @property def _lowerCAmelCase ( self ): if not self._gradients: raise ValueError("""The accumulator should be called first to initialize the gradients""" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self, lowerCamelCase__ ): if not self._gradients: A : int = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowerCamelCase__ ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowerCamelCase__ ) != len(self._gradients ): raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}''' ) for accum_gradient, gradient in zip(self._gradients, lowerCamelCase__ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowerCamelCase__ ) self._accum_steps.assign_add(1 ) def _lowerCAmelCase ( self ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
116
1
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __A = logging.get_logger(__name__) __A = { "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[str] = "codegen" _UpperCAmelCase :Optional[int] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: int = vocab_size lowercase__: str = n_ctx lowercase__: List[Any] = n_positions lowercase__: Union[str, Any] = n_embd lowercase__: Optional[Any] = n_layer lowercase__: str = n_head lowercase__: List[Any] = n_inner lowercase__: Union[str, Any] = rotary_dim lowercase__: Optional[Any] = activation_function lowercase__: Union[str, Any] = resid_pdrop lowercase__: Optional[int] = embd_pdrop lowercase__: Optional[Any] = attn_pdrop lowercase__: Optional[int] = layer_norm_epsilon lowercase__: List[Any] = initializer_range lowercase__: Tuple = use_cache lowercase__: Any = bos_token_id lowercase__: Any = eos_token_id super().__init__( bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' ) lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Any = seqlen + 2 lowercase__: List[str] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase__: Optional[Any] = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Optional[Any] = common_inputs['''attention_mask'''] if self.use_past: lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype lowercase__: List[Any] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
371
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( "--original_config_file", default=None, type=str, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--scheduler_type", default="pndm", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--pipeline_type", default=None, type=str, help=( "The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'" ". If `None` pipeline will be automatically inferred." ), ) parser.add_argument( "--image_size", default=None, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--prediction_type", default=None, type=str, help=( "The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable" " Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") parser.add_argument( "--stable_unclip", type=str, default=None, required=False, help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.", ) parser.add_argument( "--stable_unclip_prior", type=str, default=None, required=False, help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.", ) parser.add_argument( "--clip_stats_path", type=str, help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.", required=False, ) parser.add_argument( "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint." ) parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--vae_path", type=str, default=None, required=False, help="Set to a path, hub id to an already converted vae to not convert it again.", ) __A = parser.parse_args() __A = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
2
0
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase__ : Optional[Any] = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } UpperCamelCase__ : Tuple = { 'allenai/led-base-16384': 16_384, } class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = LEDTokenizer lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) -> Dict: super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) A_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , _lowerCamelCase ) != add_prefix_space: A_ : Dict = getattr(_lowerCamelCase , pre_tok_state.pop("""type""" ) ) A_ : Tuple = add_prefix_space A_ : Optional[Any] = pre_tok_class(**_lowerCamelCase ) A_ : Optional[Any] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A_ : Any = """post_processor""" A_ : Optional[Any] = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: A_ : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : Union[str, Any] = tuple(state["""sep"""] ) if "cls" in state: A_ : List[str] = tuple(state["""cls"""] ) A_ : str = False if state.get("""add_prefix_space""" , _lowerCamelCase ) != add_prefix_space: A_ : Tuple = add_prefix_space A_ : Dict = True if state.get("""trim_offsets""" , _lowerCamelCase ) != trim_offsets: A_ : List[str] = trim_offsets A_ : Any = True if changes_to_apply: A_ : Optional[int] = getattr(_lowerCamelCase , state.pop("""type""" ) ) A_ : str = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def UpperCAmelCase_ ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict: A_ : Union[str, Any] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value A_ : Optional[Any] = value def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> BatchEncoding: A_ : List[Any] = kwargs.get("""is_split_into_words""" , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> BatchEncoding: A_ : List[str] = kwargs.get("""is_split_into_words""" , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]: A_ : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ) -> str: A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]: A_ : Optional[int] = [self.sep_token_id] A_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) -> dict: A_ : List[Any] = super()._pad( encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: A_ : Tuple = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: A_ : Dict = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. A_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(_lowerCamelCase ) if needs_to_be_padded: A_ : str = len(_lowerCamelCase ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` A_ : Dict = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": A_ : Dict = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
344
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _lowerCAmelCase ( __A ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any: A_ : List[Any] = parent A_ : int = config_class A_ : int = has_text_modality A_ : str = kwargs A_ : int = common_properties def UpperCAmelCase_ ( self ) -> str: A_ : Optional[int] = self.config_class(**self.inputs_dict ) A_ : Optional[int] = ( ["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["""vocab_size"""] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" ) # Test that config has the common properties as setter for idx, name in enumerate(_lowerCamelCase ): try: setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) self.parent.assertEqual( getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(_lowerCamelCase ): try: A_ : List[str] = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def UpperCAmelCase_ ( self ) -> Tuple: A_ : Any = self.config_class(**self.inputs_dict ) A_ : Optional[int] = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : str = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" ) config_first.to_json_file(_lowerCamelCase ) A_ : Dict = self.config_class.from_json_file(_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> List[str]: A_ : Any = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(_lowerCamelCase ) A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Optional[int] = self.config_class(**self.inputs_dict ) A_ : List[Any] = """test""" with tempfile.TemporaryDirectory() as tmpdirname: A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) config_first.save_pretrained(_lowerCamelCase ) A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) A_ : str = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.config_class.is_composition: return A_ : Dict = self.config_class() self.parent.assertIsNotNone(_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: A_ : Any = copy.deepcopy(_lowerCamelCase ) A_ : Tuple = self.config_class(**_lowerCamelCase ) A_ : Optional[Any] = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) ) elif getattr(_lowerCamelCase , _lowerCamelCase ) != value: wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) ) if len(_lowerCamelCase ) > 0: A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] ) raise ValueError(F"The following keys were not properly set in the config:\n{errors}" ) def UpperCAmelCase_ ( self ) -> Optional[int]: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
344
1
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : int = (CMStochasticIterativeScheduler,) lowerCamelCase : Optional[int] = 10 def __UpperCAmelCase ( self : int , **UpperCAmelCase__ : Any ) -> Optional[int]: lowerCAmelCase = { 'num_train_timesteps': 2_0_1, 'sigma_min': 0.002, 'sigma_max': 80.0, } config.update(**UpperCAmelCase__ ) return config def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: lowerCAmelCase = 1_0 lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = self.scheduler_classes[0](**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) lowerCAmelCase = scheduler.timesteps[0] lowerCAmelCase = scheduler.timesteps[1] lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __UpperCAmelCase ( self : Tuple ) -> Tuple: for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__ ) def __UpperCAmelCase ( self : str ) -> Any: for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Tuple ) -> Any: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) lowerCAmelCase = 1 scheduler.set_timesteps(UpperCAmelCase__ ) lowerCAmelCase = scheduler.timesteps lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(UpperCAmelCase__ ): # 1. scale model input lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) # 2. predict noise residual lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ ) # 3. predict previous sample x_t-1 lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1E-2 assert abs(result_mean.item() - 0.2_510 ) < 1E-3 def __UpperCAmelCase ( self : List[Any] ) -> str: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) lowerCAmelCase = [1_0_6, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase__ ) lowerCAmelCase = scheduler.timesteps lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) # 2. predict noise residual lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ ) # 3. predict previous sample x_t-1 lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample lowerCAmelCase = pred_prev_sample lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1E-2 assert abs(result_mean.item() - 0.4_527 ) < 1E-3 def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) lowerCAmelCase = [3_9, 3_0, 1_2, 1_5, 0] with self.assertRaises(UpperCAmelCase__ , msg='`timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=UpperCAmelCase__ ) def __UpperCAmelCase ( self : int ) -> Optional[Any]: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) lowerCAmelCase = [3_9, 3_0, 1_2, 1, 0] lowerCAmelCase = len(UpperCAmelCase__ ) with self.assertRaises(UpperCAmelCase__ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[str] ) -> int: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) lowerCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
55
'''simple docstring''' from __future__ import annotations def a_ ( lowerCamelCase : list[float] , lowerCamelCase : list[float] ): lowerCAmelCase = sorted(numsa + numsa ) lowerCAmelCase , lowerCAmelCase = divmod(len(lowerCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __snake_case =[float(x) for x in input("""Enter the elements of first array: """).split()] __snake_case =[float(x) for x in input("""Enter the elements of second array: """).split()] print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
55
1
from __future__ import annotations def UpperCamelCase ( _A, _A, _A ): """simple docstring""" __magic_name__ : Union[str, Any] = list(range(len(_A ) ) ) __magic_name__ : Dict = [v / w for v, w in zip(_A, _A )] index.sort(key=lambda _A : ratio[i], reverse=_A ) __magic_name__ : float = 0 __magic_name__ : list[float] = [0] * len(_A ) for i in index: if weight[i] <= capacity: __magic_name__ : Optional[int] = 1 max_value += value[i] capacity -= weight[i] else: __magic_name__ : int = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
342
import re def UpperCamelCase ( _A ): """simple docstring""" __magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" ) if match := re.search(_A, _A ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("+918827897895"))
342
1
'''simple docstring''' from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer __A : Optional[Any] = logging.get_logger(__name__) __A : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __A : Optional[int] = { "vocab_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json" }, "merges_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt" }, } __A : Optional[Any] = {"allegro/herbert-base-cased": 514} __A : Tuple = {} class __snake_case ( _SCREAMING_SNAKE_CASE): """simple docstring""" lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = HerbertTokenizer def __init__( self : str , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[Any]="<s>" , lowerCamelCase : Optional[int]="<unk>" , lowerCamelCase : List[Any]="<pad>" , lowerCamelCase : Dict="<mask>" , lowerCamelCase : Optional[int]="</s>" , **lowerCamelCase : List[str] , ) -> Optional[Any]: super().__init__( lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sep_token=lowerCamelCase , **lowerCamelCase , ) def __lowercase ( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]: lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] lowerCAmelCase_ : Dict = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowercase ( self : Optional[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1] def __lowercase ( self : Optional[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]: lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]: lowerCAmelCase_ : Optional[Any] = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase ) return tuple(lowerCamelCase )
367
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer __A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A : int = { "vocab_file": { "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt", }, "tokenizer_file": { "unc-nlp/lxmert-base-uncased": ( "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json" ), }, } __A : List[Any] = { "unc-nlp/lxmert-base-uncased": 512, } __A : Optional[Any] = { "unc-nlp/lxmert-base-uncased": {"do_lower_case": True}, } class __snake_case ( _SCREAMING_SNAKE_CASE): """simple docstring""" lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = LxmertTokenizer def __init__( self : Dict , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]="[UNK]" , lowerCamelCase : Tuple="[SEP]" , lowerCamelCase : str="[PAD]" , lowerCamelCase : Any="[CLS]" , lowerCamelCase : str="[MASK]" , lowerCamelCase : Dict=True , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : Optional[int] , ) -> Optional[Any]: super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , ) lowerCAmelCase_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars ): lowerCAmelCase_ : Tuple = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) ) lowerCAmelCase_ : Optional[Any] = do_lower_case lowerCAmelCase_ : Dict = strip_accents lowerCAmelCase_ : List[Any] = tokenize_chinese_chars lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCamelCase ) lowerCAmelCase_ : Tuple = do_lower_case def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[Any]=None ) -> int: lowerCAmelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowercase ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]: lowerCAmelCase_ : str = [self.sep_token_id] lowerCAmelCase_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]: lowerCAmelCase_ : int = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase ) return tuple(lowerCamelCase )
89
0
"""simple docstring""" from math import sqrt def a_ ( _lowerCAmelCase : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(_lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a_ ( _lowerCAmelCase : int = 1_0001 ): '''simple docstring''' lowercase__ : Any = 0 lowercase__ : int = 1 while count != nth and number < 3: number += 1 if is_prime(_lowerCAmelCase ): count += 1 while count != nth: number += 2 if is_prime(_lowerCAmelCase ): count += 1 return number if __name__ == "__main__": print(f'''{solution() = }''')
77
'''simple docstring''' from __future__ import annotations def a__ ( lowercase : str, lowercase : list[str] | None = None, lowercase : dict[str, float] | None = None, lowercase : bool = False, ) -> tuple[int, float, str]: """simple docstring""" _UpperCamelCase = cipher_alphabet or [chr(lowercase ) for i in range(97, 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) _UpperCamelCase = { '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary _UpperCamelCase = frequencies_dict if not case_sensitive: _UpperCamelCase = ciphertext.lower() # Chi squared statistic values _UpperCamelCase = {} # cycle through all of the shifts for shift in range(len(lowercase ) ): _UpperCamelCase = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet _UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter _UpperCamelCase = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: _UpperCamelCase = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase = decrypted_with_shift.lower().count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase = decrypted_with_shift.count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary _UpperCamelCase = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] _UpperCamelCase = min( lowercase, key=lowercase, ) # Get all the data from the most likely cipher (key, decoded message) ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
324
0
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> list: """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) <= 1: return lst UpperCamelCase_ = 1 while i < len(SCREAMING_SNAKE_CASE_ ): if lst[i - 1] <= lst[i]: i += 1 else: UpperCamelCase_ , UpperCamelCase_ = lst[i], lst[i - 1] i -= 1 if i == 0: UpperCamelCase_ = 1 return lst if __name__ == "__main__": SCREAMING_SNAKE_CASE :Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip() SCREAMING_SNAKE_CASE :Optional[int] = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
60
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __magic_name__ ( snake_case ): def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )-> Optional[Any]: super().__init__() if safety_checker is None: logger.warning( F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=_lowercase , speech_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , feature_extractor=_lowercase , ) def UpperCAmelCase_ ( self , _lowercase = "auto" )-> str: if slice_size == "auto": UpperCamelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_lowercase ) def UpperCAmelCase_ ( self )-> Optional[int]: self.enable_attention_slicing(_lowercase ) @torch.no_grad() def __call__( self , _lowercase , _lowercase=16_000 , _lowercase = 512 , _lowercase = 512 , _lowercase = 50 , _lowercase = 7.5 , _lowercase = None , _lowercase = 1 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , **_lowercase , )-> str: UpperCamelCase_ = self.speech_processor.feature_extractor( _lowercase , return_tensors="pt" , sampling_rate=_lowercase ).input_features.to(self.device ) UpperCamelCase_ = self.speech_model.generate(_lowercase , max_length=480_000 ) UpperCamelCase_ = self.speech_processor.tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , normalize=_lowercase )[ 0 ] if isinstance(_lowercase , _lowercase ): UpperCamelCase_ = 1 elif isinstance(_lowercase , _lowercase ): UpperCamelCase_ = len(_lowercase ) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_lowercase )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(_lowercase )}." ) # get prompt text embeddings UpperCamelCase_ = self.tokenizer( _lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) UpperCamelCase_ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F" {self.tokenizer.model_max_length} tokens: {removed_text}" ) UpperCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length] UpperCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_embeddings.shape UpperCamelCase_ = text_embeddings.repeat(1 , _lowercase , 1 ) UpperCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , _lowercase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase_ = 42 if negative_prompt is None: UpperCamelCase_ = [""] * batch_size elif type(_lowercase ) is not type(_lowercase ): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(_lowercase )} !=" F" {type(_lowercase )}." ) elif isinstance(_lowercase , _lowercase ): UpperCamelCase_ = [negative_prompt] elif batch_size != len(_lowercase ): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(_lowercase )}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: UpperCamelCase_ = negative_prompt UpperCamelCase_ = text_input_ids.shape[-1] UpperCamelCase_ = self.tokenizer( _lowercase , padding="max_length" , max_length=_lowercase , truncation=_lowercase , return_tensors="pt" , ) UpperCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase_ = uncond_embeddings.shape[1] UpperCamelCase_ = uncond_embeddings.repeat(1 , _lowercase , 1 ) UpperCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowercase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device="cpu" , dtype=_lowercase ).to( self.device ) else: UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase ) else: if latents.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) UpperCamelCase_ = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(_lowercase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCamelCase_ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase_ = {} if accepts_eta: UpperCamelCase_ = eta for i, t in enumerate(self.progress_bar(_lowercase ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase_ = self.scheduler.scale_model_input(_lowercase , _lowercase ) # predict the noise residual UpperCamelCase_ = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample # perform guidance if do_classifier_free_guidance: UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 ) UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase_ = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_lowercase , _lowercase , _lowercase ) UpperCamelCase_ = 1 / 0.18_215 * latents UpperCamelCase_ = self.vae.decode(_lowercase ).sample UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCamelCase_ = self.numpy_to_pil(_lowercase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=_lowercase , nsfw_content_detected=_lowercase )
60
1
'''simple docstring''' import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class lowerCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] ) -> Optional[int]: """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE = nn.Linear(3 , 4 ) __SCREAMING_SNAKE_CASE = nn.BatchNormad(4 ) __SCREAMING_SNAKE_CASE = nn.Linear(4 , 5 ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: """simple docstring""" return self.lineara(self.batchnorm(self.lineara(__SCREAMING_SNAKE_CASE ) ) ) class lowerCAmelCase__ ( a ): """simple docstring""" def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" return (args[0] + 1,) + args[1:], kwargs class lowerCAmelCase__ ( a ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return output + 1 class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ModelForTest() __SCREAMING_SNAKE_CASE = ModelHook() add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(test_model._hf_hook , __SCREAMING_SNAKE_CASE ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__SCREAMING_SNAKE_CASE ) self.assertFalse(hasattr(__SCREAMING_SNAKE_CASE , """_hf_hook""" ) ) self.assertFalse(hasattr(__SCREAMING_SNAKE_CASE , """_old_forward""" ) ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ModelForTest() __SCREAMING_SNAKE_CASE = ModelHook() add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , append=__SCREAMING_SNAKE_CASE ) self.assertEqual(isinstance(test_model._hf_hook , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__SCREAMING_SNAKE_CASE ) self.assertFalse(hasattr(__SCREAMING_SNAKE_CASE , """_hf_hook""" ) ) self.assertFalse(hasattr(__SCREAMING_SNAKE_CASE , """_old_forward""" ) ) def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ModelForTest() __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ) __SCREAMING_SNAKE_CASE = test_model(x + 1 ) __SCREAMING_SNAKE_CASE = test_model(x + 2 ) __SCREAMING_SNAKE_CASE = PreForwardHook() add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = test_model(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __SCREAMING_SNAKE_CASE = PreForwardHook() add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = test_model(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __SCREAMING_SNAKE_CASE = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = test_model(__SCREAMING_SNAKE_CASE ) assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = ModelForTest() __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ) __SCREAMING_SNAKE_CASE = test_model(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = PostForwardHook() add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = test_model(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __SCREAMING_SNAKE_CASE = PostForwardHook() add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = test_model(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __SCREAMING_SNAKE_CASE = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = test_model(__SCREAMING_SNAKE_CASE ) assert torch.allclose(__SCREAMING_SNAKE_CASE , output + 2 , atol=1E-5 ) def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ModelForTest() __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ) __SCREAMING_SNAKE_CASE = test_model(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = PostForwardHook() add_hook_to_module(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = test_model(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , output + 1 ) ) self.assertTrue(outputa.requires_grad ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = test_model(__SCREAMING_SNAKE_CASE ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(__SCREAMING_SNAKE_CASE , AlignDevicesHook(io_same_device=__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ).to(0 ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.assertEqual(output.device , torch.device(0 ) ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices __SCREAMING_SNAKE_CASE = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device __SCREAMING_SNAKE_CASE = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.assertEqual(output.device , __SCREAMING_SNAKE_CASE ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload __SCREAMING_SNAKE_CASE = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__SCREAMING_SNAKE_CASE ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.assertEqual(output.device , __SCREAMING_SNAKE_CASE ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices __SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(__SCREAMING_SNAKE_CASE , execution_device=__SCREAMING_SNAKE_CASE , offload=__SCREAMING_SNAKE_CASE ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device __SCREAMING_SNAKE_CASE = torch.device(__SCREAMING_SNAKE_CASE ) self.assertEqual(model.batchnorm.running_mean.device , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.assertEqual(output.device , __SCREAMING_SNAKE_CASE ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__SCREAMING_SNAKE_CASE ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(__SCREAMING_SNAKE_CASE , execution_device=__SCREAMING_SNAKE_CASE , offload=__SCREAMING_SNAKE_CASE , offload_buffers=__SCREAMING_SNAKE_CASE ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.assertEqual(output.device , __SCREAMING_SNAKE_CASE ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__SCREAMING_SNAKE_CASE ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices __SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( __SCREAMING_SNAKE_CASE , execution_device=__SCREAMING_SNAKE_CASE , offload=__SCREAMING_SNAKE_CASE , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device __SCREAMING_SNAKE_CASE = torch.device(__SCREAMING_SNAKE_CASE ) self.assertEqual(model.batchnorm.running_mean.device , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.assertEqual(output.device , __SCREAMING_SNAKE_CASE ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__SCREAMING_SNAKE_CASE ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( __SCREAMING_SNAKE_CASE , execution_device=__SCREAMING_SNAKE_CASE , offload=__SCREAMING_SNAKE_CASE , weights_map=model.state_dict() , offload_buffers=__SCREAMING_SNAKE_CASE , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) __SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.assertEqual(output.device , __SCREAMING_SNAKE_CASE ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__SCREAMING_SNAKE_CASE ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
267
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase : List[str] = logging.get_logger(__name__) class lowerCAmelCase__ ( a , a ): """simple docstring""" lowerCAmelCase__ = "maskformer-swin" lowerCAmelCase__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple=224 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=96 , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE : Any=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Dict=4.0 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-5 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = embed_dim __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = num_heads __SCREAMING_SNAKE_CASE = window_size __SCREAMING_SNAKE_CASE = mlp_ratio __SCREAMING_SNAKE_CASE = qkv_bias __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = drop_path_rate __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = use_absolute_embeddings __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) ) __SCREAMING_SNAKE_CASE = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
267
1
'''simple docstring''' def lowerCamelCase__ ( _A ): if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) a : List[str] = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 a : Any = 1 if upper_limit > 0: a : List[str] = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(_lowerCAmelCase ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('\n********* Catalan Numbers Using Dynamic Programming ************\n') print('\n*** Enter -1 at any time to quit ***') print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='') try: while True: lowerCAmelCase: Any = int(input().strip()) if N < 0: print('\n********* Goodbye!! ************') break else: print(F"The Catalan numbers from 0 through {N} are:") print(catalan_numbers(N)) print('Try another upper limit for the sequence: ', end='') except (NameError, ValueError): print('\n********* Invalid input, goodbye! ************\n') import doctest doctest.testmod()
354
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase: List[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Optional[int] = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Dict = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
96
0
import re def UpperCamelCase_( lowerCamelCase_ ) -> bool: _lowercase : str = re.compile( R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = "0094702343221" print(is_sri_lankan_phone_number(phone))
21
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE : List[str] = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[Any] = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
1
from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter a__ : Union[str, Any] = '''Create a default config file for Accelerate with only a few flags set.''' def UpperCAmelCase__ (lowerCAmelCase_="no" , lowerCAmelCase_ = default_json_config_file , lowerCAmelCase_ = False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ) path.parent.mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) if path.exists(): print( f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False __SCREAMING_SNAKE_CASE = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) __SCREAMING_SNAKE_CASE = { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if torch.cuda.is_available(): __SCREAMING_SNAKE_CASE = torch.cuda.device_count() __SCREAMING_SNAKE_CASE = num_gpus __SCREAMING_SNAKE_CASE = False if num_gpus > 1: __SCREAMING_SNAKE_CASE = "MULTI_GPU" else: __SCREAMING_SNAKE_CASE = "NO" elif is_xpu_available() and use_xpu: __SCREAMING_SNAKE_CASE = torch.xpu.device_count() __SCREAMING_SNAKE_CASE = num_xpus __SCREAMING_SNAKE_CASE = False if num_xpus > 1: __SCREAMING_SNAKE_CASE = "MULTI_XPU" else: __SCREAMING_SNAKE_CASE = "NO" elif is_npu_available(): __SCREAMING_SNAKE_CASE = torch.npu.device_count() __SCREAMING_SNAKE_CASE = num_npus __SCREAMING_SNAKE_CASE = False if num_npus > 1: __SCREAMING_SNAKE_CASE = "MULTI_NPU" else: __SCREAMING_SNAKE_CASE = "NO" else: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = "NO" __SCREAMING_SNAKE_CASE = ClusterConfig(**lowerCAmelCase_ ) config.to_json_file(lowerCAmelCase_ ) return path def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = parser.add_parser("default" , parents=lowerCAmelCase_ , help=lowerCAmelCase_ , formatter_class=lowerCAmelCase_ ) parser.add_argument( "--config_file" , default=lowerCAmelCase_ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , dest="save_location" , ) parser.add_argument( "--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowerCAmelCase_ , help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , ) parser.set_defaults(func=lowerCAmelCase_ ) return parser def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"""accelerate configuration saved at {config_file}""" )
366
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if not nums: return 0 __SCREAMING_SNAKE_CASE = nums[0] __SCREAMING_SNAKE_CASE = 0 for num in nums[1:]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( max_excluding + num, max(lowerCAmelCase_ , lowerCAmelCase_ ), ) return max(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
195
0
'''simple docstring''' from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "isbn/0140328726" ): """simple docstring""" lowerCAmelCase__ : List[str] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: lowerCAmelCase__ : Tuple = f"""{olid} is not a valid Open Library olid""" raise ValueError(A_ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } lowerCAmelCase__ : Union[str, Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} lowerCAmelCase__ : Dict = [ get_openlibrary_data(author["""key"""] )['''name'''] for author in data['''Authors'''] ] lowerCAmelCase__ : List[str] = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(A_ , A_ ): lowerCAmelCase__ : Any = ''', '''.join(A_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: _lowerCAmelCase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""") continue print(F"""\nSearching Open Library for ISBN: {isbn}...\n""") try: _lowerCAmelCase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}""")) print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"""Sorry, there are no results for ISBN: {isbn}.""")
37
"""simple docstring""" import unittest import numpy as np def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) if shape_a[0] != shape_b[0]: _lowerCamelCase : Tuple = ( '''Expected the same number of rows for A and B. ''' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(A_ ) if shape_b[1] != shape_c[1]: _lowerCamelCase : Tuple = ( '''Expected the same number of columns for B and C. ''' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(A_ ) _lowerCamelCase : List[str] = pseudo_inv if a_inv is None: try: _lowerCamelCase : Any = np.linalg.inv(A_ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] ) _lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] ) _lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase ) self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : int = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
72
0
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowercase ( A__ , unittest.TestCase ): """simple docstring""" _a = XLMRobertaTokenizer _a = XLMRobertaTokenizerFast _a = True _a = True def lowerCAmelCase__ ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase__ :Dict = XLMRobertaTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :str = '''<pad>''' UpperCamelCase__ :Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(UpperCamelCase_ ) , 1002 ) def lowerCAmelCase__ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1002 ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Any = XLMRobertaTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) UpperCamelCase__ :Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCamelCase__ :Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCamelCase__ :Optional[int] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCamelCase__ :Any = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def lowerCAmelCase__ ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCamelCase__ :int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCamelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase__ :str = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase__ :List[Any] = tempfile.mkdtemp() UpperCamelCase__ :List[Any] = tokenizer_r.save_pretrained(UpperCamelCase_ ) UpperCamelCase__ :List[Any] = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) UpperCamelCase__ :Dict = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ ) # Checks everything loads correctly in the same way UpperCamelCase__ :str = tokenizer_r.from_pretrained(UpperCamelCase_ ) UpperCamelCase__ :int = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_ ) # Save tokenizer rust, legacy_format=True UpperCamelCase__ :Optional[int] = tempfile.mkdtemp() UpperCamelCase__ :Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ ) UpperCamelCase__ :List[str] = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ ) # Checks everything loads correctly in the same way UpperCamelCase__ :List[Any] = tokenizer_r.from_pretrained(UpperCamelCase_ ) UpperCamelCase__ :Tuple = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) shutil.rmtree(UpperCamelCase_ ) # Save tokenizer rust, legacy_format=False UpperCamelCase__ :Dict = tempfile.mkdtemp() UpperCamelCase__ :Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCamelCase__ :Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_ ) UpperCamelCase__ :Dict = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) shutil.rmtree(UpperCamelCase_ ) @cached_property def lowerCAmelCase__ ( self ): '''simple docstring''' return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def lowerCAmelCase__ ( self ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCamelCase_ , f.name ) UpperCamelCase__ :str = XLMRobertaTokenizer(f.name , keep_accents=UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = pickle.dumps(UpperCamelCase_ ) pickle.loads(UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCamelCase__ :str = self.get_tokenizer() UpperCamelCase__ :str = self.get_rust_tokenizer() UpperCamelCase__ :int = '''I was born in 92000, and this is falsé.''' UpperCamelCase__ :Tuple = tokenizer.tokenize(UpperCamelCase_ ) UpperCamelCase__ :str = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ :Tuple = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) UpperCamelCase__ :Any = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ :Tuple = self.get_rust_tokenizer() UpperCamelCase__ :Union[str, Any] = tokenizer.encode(UpperCamelCase_ ) UpperCamelCase__ :Dict = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :int = '''Hello World!''' UpperCamelCase__ :Union[str, Any] = [0, 35378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCamelCase__ :int = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :str = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
369
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowercase ( A__ ): """simple docstring""" _a = (DDIMParallelScheduler,) _a = (('eta', 0.0), ('num_inference_steps', 50)) def lowerCAmelCase__ ( self , **UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :List[Any] = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**UpperCamelCase_ ) return config def lowerCAmelCase__ ( self , **UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :str = self.scheduler_classes[0] UpperCamelCase__ :Optional[Any] = self.get_scheduler_config(**UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = scheduler_class(**UpperCamelCase_ ) UpperCamelCase__ , UpperCamelCase__ :Optional[int] = 10, 0.0 UpperCamelCase__ :List[str] = self.dummy_model() UpperCamelCase__ :List[str] = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase_ ) for t in scheduler.timesteps: UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ :List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample return sample def lowerCAmelCase__ ( self ): '''simple docstring''' for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=UpperCamelCase_ ) UpperCamelCase__ :List[str] = self.scheduler_classes[0] UpperCamelCase__ :List[str] = self.get_scheduler_config(steps_offset=1 ) UpperCamelCase__ :str = scheduler_class(**UpperCamelCase_ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' self.check_over_configs(thresholding=UpperCamelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def lowerCAmelCase__ ( self ): '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=UpperCamelCase_ , eta=UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = self.scheduler_classes[0] UpperCamelCase__ :List[str] = self.get_scheduler_config() UpperCamelCase__ :int = scheduler_class(**UpperCamelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.scheduler_classes[0] UpperCamelCase__ :Dict = self.get_scheduler_config() UpperCamelCase__ :Any = scheduler_class(**UpperCamelCase_ ) UpperCamelCase__ , UpperCamelCase__ :List[str] = 10, 0.0 scheduler.set_timesteps(UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = self.dummy_model() UpperCamelCase__ :List[Any] = self.dummy_sample_deter UpperCamelCase__ :int = self.dummy_sample_deter + 0.1 UpperCamelCase__ :List[str] = self.dummy_sample_deter - 0.1 UpperCamelCase__ :str = samplea.shape[0] UpperCamelCase__ :Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase__ :List[Any] = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ ) UpperCamelCase__ :Dict = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase__ :Any = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase_ ) UpperCamelCase__ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) ) UpperCamelCase__ :Any = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :int = self.full_loop() UpperCamelCase__ :List[str] = torch.sum(torch.abs(UpperCamelCase_ ) ) UpperCamelCase__ :Any = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.223967 ) < 1e-3 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Any = self.full_loop(prediction_type='''v_prediction''' ) UpperCamelCase__ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) ) UpperCamelCase__ :str = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 ) UpperCamelCase__ :str = torch.sum(torch.abs(UpperCamelCase_ ) ) UpperCamelCase__ :int = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 ) UpperCamelCase__ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) ) UpperCamelCase__ :List[str] = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
219
0
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( "--original_config_file", default=None, type=str, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--scheduler_type", default="pndm", type=str, help="Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']", ) parser.add_argument( "--pipeline_type", default=None, type=str, help=( "The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'" ". If `None` pipeline will be automatically inferred." ), ) parser.add_argument( "--image_size", default=None, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--prediction_type", default=None, type=str, help=( "The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable" " Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") parser.add_argument( "--stable_unclip", type=str, default=None, required=False, help="Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.", ) parser.add_argument( "--stable_unclip_prior", type=str, default=None, required=False, help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.", ) parser.add_argument( "--clip_stats_path", type=str, help="Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.", required=False, ) parser.add_argument( "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint." ) parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--vae_path", type=str, default=None, required=False, help="Set to a path, hub id to an already converted vae to not convert it again.", ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : List[str] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
85
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase : str = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
0
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class a ( lowerCAmelCase_ ): def __init__( self : List[str] , *__lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : int=None , **__lowerCAmelCase : Any ): super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = eval_examples _UpperCAmelCase = post_process_function def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : str = "eval" ): _UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset _UpperCAmelCase = self.get_eval_dataloader(__lowerCAmelCase ) _UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _UpperCAmelCase = self.compute_metrics _UpperCAmelCase = None _UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _UpperCAmelCase = time.time() try: _UpperCAmelCase = eval_loop( __lowerCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , metric_key_prefix=__lowerCAmelCase , ) finally: _UpperCAmelCase = compute_metrics _UpperCAmelCase = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( __lowerCAmelCase , __lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _UpperCAmelCase = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions ) _UpperCAmelCase = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): _UpperCAmelCase = metrics.pop(__lowerCAmelCase ) metrics.update(output.metrics ) else: _UpperCAmelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__lowerCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str = "test" ): _UpperCAmelCase = self.get_test_dataloader(__lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _UpperCAmelCase = self.compute_metrics _UpperCAmelCase = None _UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _UpperCAmelCase = time.time() try: _UpperCAmelCase = eval_loop( __lowerCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , metric_key_prefix=__lowerCAmelCase , ) finally: _UpperCAmelCase = compute_metrics _UpperCAmelCase = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( __lowerCAmelCase , __lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _UpperCAmelCase = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , """predict""" ) _UpperCAmelCase = self.compute_metrics(__lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): _UpperCAmelCase = metrics.pop(__lowerCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase )
30
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'layoutlmv3' def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ): super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) _UpperCAmelCase = max_ad_position_embeddings _UpperCAmelCase = coordinate_size _UpperCAmelCase = shape_size _UpperCAmelCase = has_relative_attention_bias _UpperCAmelCase = rel_pos_bins _UpperCAmelCase = max_rel_pos _UpperCAmelCase = has_spatial_attention_bias _UpperCAmelCase = rel_ad_pos_bins _UpperCAmelCase = max_rel_ad_pos _UpperCAmelCase = text_embed _UpperCAmelCase = visual_embed _UpperCAmelCase = input_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = classifier_dropout class a ( lowerCAmelCase_ ): _snake_case : str = version.parse('1.12' ) @property def lowerCAmelCase_ ( self : Dict ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def lowerCAmelCase_ ( self : List[Any] ): return 1e-5 @property def lowerCAmelCase_ ( self : List[str] ): return 12 def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes _UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) _UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
30
1
'''simple docstring''' import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case ( lowercase , lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = StableUnCLIPPipeline _lowerCamelCase = TEXT_TO_IMAGE_PARAMS _lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS _lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS _lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = 32 lowerCamelCase_ = embedder_hidden_size # prior components torch.manual_seed(0 ) lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase_ = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase_ = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase , num_layers=1 , ) torch.manual_seed(0 ) lowerCamelCase_ = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase ) lowerCamelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase_ = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase_ = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , ) torch.manual_seed(0 ) lowerCamelCase_ = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) lowerCamelCase_ = AutoencoderKL() lowerCamelCase_ = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" if str(UpperCamelCase ).startswith("mps" ): lowerCamelCase_ = torch.manual_seed(UpperCamelCase ) else: lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowerCamelCase_ = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def snake_case ( self ): """simple docstring""" lowerCamelCase_ = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase ) @slow @require_torch_gpu class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase_ = pipe("anime turle" , generator=UpperCamelCase , output_type="np" ) lowerCamelCase_ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) lowerCamelCase_ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase_ = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) lowerCamelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
55
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = 42 _lowerCamelCase = 42 def __init__( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" super().__init__() self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase ) @torch.no_grad() def __call__( self , UpperCamelCase = 1 , UpperCamelCase = 2000 , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , **UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = self.unet.config.sample_size lowerCamelCase_ = (batch_size, 3, img_size, img_size) lowerCamelCase_ = self.unet lowerCamelCase_ = randn_tensor(UpperCamelCase , generator=UpperCamelCase ) * self.scheduler.init_noise_sigma lowerCamelCase_ = sample.to(self.device ) self.scheduler.set_timesteps(UpperCamelCase ) self.scheduler.set_sigmas(UpperCamelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowerCamelCase_ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowerCamelCase_ = self.unet(UpperCamelCase , UpperCamelCase ).sample lowerCamelCase_ = self.scheduler.step_correct(UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample # prediction step lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ).sample lowerCamelCase_ = self.scheduler.step_pred(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ) lowerCamelCase_ ,lowerCamelCase_ = output.prev_sample, output.prev_sample_mean lowerCamelCase_ = sample_mean.clamp(0 , 1 ) lowerCamelCase_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase_ = self.numpy_to_pil(UpperCamelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=UpperCamelCase )
55
1
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCamelCase__ = '''hf-internal-testing/tiny-random-bert''' UpperCamelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') UpperCamelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = cached_file(_A , _A ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_A ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_A , _A ) ) ) with open(os.path.join(_A , '''refs''' , '''main''' ) ) as f: UpperCAmelCase__ : int = f.read() self.assertEqual(_A , os.path.join(_A , '''snapshots''' , _A , _A ) ) self.assertTrue(os.path.isfile(_A ) ) # File is cached at the same place the second time. UpperCAmelCase__ : str = cached_file(_A , _A ) self.assertEqual(_A , _A ) # Using a specific revision to test the full commit hash. UpperCAmelCase__ : Dict = cached_file(_A , _A , revision='''9b8c223''' ) self.assertEqual(_A , os.path.join(_A , '''snapshots''' , _A , _A ) ) def lowercase_ ( self : int ): '''simple docstring''' with self.assertRaisesRegex(_A , '''is not a valid model identifier''' ): UpperCAmelCase__ : int = cached_file('''tiny-random-bert''' , _A ) with self.assertRaisesRegex(_A , '''is not a valid git identifier''' ): UpperCAmelCase__ : str = cached_file(_A , _A , revision='''aaaa''' ) with self.assertRaisesRegex(_A , '''does not appear to have a file named''' ): UpperCAmelCase__ : str = cached_file(_A , '''conf''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaisesRegex(_A , '''does not appear to have a file named''' ): UpperCAmelCase__ : Union[str, Any] = cached_file(_A , '''conf''' ) with open(os.path.join(_A , '''refs''' , '''main''' ) ) as f: UpperCAmelCase__ : Any = f.read() self.assertTrue(os.path.isfile(os.path.join(_A , '''.no_exist''' , _A , '''conf''' ) ) ) UpperCAmelCase__ : List[Any] = cached_file(_A , '''conf''' , _raise_exceptions_for_missing_entries=_A ) self.assertIsNone(_A ) UpperCAmelCase__ : Optional[int] = cached_file(_A , '''conf''' , local_files_only=_A , _raise_exceptions_for_missing_entries=_A ) self.assertIsNone(_A ) UpperCAmelCase__ : int = mock.Mock() UpperCAmelCase__ : Optional[Any] = 500 UpperCAmelCase__ : Any = {} UpperCAmelCase__ : Optional[int] = HTTPError UpperCAmelCase__ : Optional[Any] = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_A ) as mock_head: UpperCAmelCase__ : List[str] = cached_file(_A , '''conf''' , _raise_exceptions_for_connection_errors=_A ) self.assertIsNone(_A ) # This check we did call the fake head request mock_head.assert_called() def lowercase_ ( self : Dict ): '''simple docstring''' self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _A ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _A ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _A ) ) def lowercase_ ( self : str ): '''simple docstring''' self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_A , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , _A ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_A , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , _A , revision='''ahaha''' ) UpperCAmelCase__ : Any = get_file_from_repo('''bert-base-cased''' , _A ) # The name is the cached name which is not very easy to test, so instead we load the content. UpperCAmelCase__ : Any = json.loads(open(_A , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 768 ) def lowercase_ ( self : List[Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Union[str, Any] = Path(_A ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(_A , '''a.txt''' ) , str(_A ) ) self.assertIsNone(get_file_from_repo(_A , '''b.txt''' ) )
299
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = XLMRobertaTokenizer lowerCAmelCase__ = XLMRobertaTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True def lowercase_ ( self : Dict ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = '''<pad>''' UpperCAmelCase__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_A ) , 1_002 ) def lowercase_ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_002 ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = XLMRobertaTokenizer(_A , keep_accents=_A ) UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def lowercase_ ( self : str ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : List[str] = tempfile.mkdtemp() UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(_A ) UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) UpperCAmelCase__ : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Any = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : List[str] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_A ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) @cached_property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def lowercase_ ( self : Any ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_A , f.name ) UpperCAmelCase__ : int = XLMRobertaTokenizer(f.name , keep_accents=_A ) UpperCAmelCase__ : str = pickle.dumps(_A ) pickle.loads(_A ) def lowercase_ ( self : int ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase__ : Dict = '''I was born in 92000, and this is falsé.''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : Any = self.get_rust_tokenizer() UpperCAmelCase__ : List[Any] = tokenizer.encode(_A ) UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : str = '''Hello World!''' UpperCAmelCase__ : Tuple = [0, 35_378, 6_661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase__ : Any = [ 0, 3_293, 83, 10, 4_552, 4_989, 7_986, 678, 10, 5_915, 111, 179_459, 124_850, 4, 6_044, 237, 12, 6, 5, 6, 4, 6_780, 705, 15, 1_388, 44, 378, 10_114, 711, 152, 20, 6, 5, 22_376, 642, 1_221, 15_190, 34_153, 450, 5_608, 959, 1_119, 57_702, 136, 186, 47, 1_098, 29_367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_044, 237, 6_284, 50_901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
299
1
"""simple docstring""" import math class a : """simple docstring""" def UpperCamelCase ( self: int , UpperCamelCase: list[list[float]] , UpperCamelCase: list[int] ): """simple docstring""" A__ = 0.0 A__ = 0.0 for i in range(len(lowercase_ ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase ( self: List[str] , UpperCamelCase: list[list[int | float]] , UpperCamelCase: list[int] , UpperCamelCase: int , UpperCamelCase: float ): """simple docstring""" for i in range(len(lowercase_ ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _snake_case ( ): # Training Examples ( m, n ) A__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) A__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training A__ = SelfOrganizingMap() A__ = 3 A__ = 0.5 for _ in range(UpperCAmelCase__ ): for j in range(len(UpperCAmelCase__ ) ): # training sample A__ = training_samples[j] # Compute the winning vector A__ = self_organizing_map.get_winner(UpperCAmelCase__ , UpperCAmelCase__ ) # Update the winning vector A__ = self_organizing_map.update(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # classify test sample A__ = [0, 0, 0, 1] A__ = self_organizing_map.get_winner(UpperCAmelCase__ , UpperCAmelCase__ ) # results print(F"""Clusters that the test sample belongs to : {winner}""" ) print(F"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
335
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] ) @pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] ) @pytest.mark.parametrize("""revision""" , [None, """v2"""] ) def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Optional[int]: lowercase_ : Union[str, Any] = hf_hub_url(repo_id=UpperCAmelCase__ , path=UpperCAmelCase__ , revision=UpperCAmelCase__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(UpperCAmelCase__ )}'''
239
0
import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a : """simple docstring""" def __init__( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : int=13 , lowerCamelCase : int=30 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Tuple=3 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Dict=True , lowerCamelCase : Optional[int]=32 , lowerCamelCase : List[Any]=5 , lowerCamelCase : Tuple=4 , lowerCamelCase : Union[str, Any]=37 , lowerCamelCase : int="gelu" , lowerCamelCase : Dict=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : List[Any]=10 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : List[str]=3 , lowerCamelCase : Dict=0.6 , lowerCamelCase : Dict=None , ) -> Any: __snake_case : str = parent __snake_case : Tuple = batch_size __snake_case : Optional[int] = image_size __snake_case : Dict = patch_size __snake_case : List[Any] = num_channels __snake_case : Tuple = is_training __snake_case : Dict = use_labels __snake_case : List[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : Any = hidden_act __snake_case : Tuple = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Union[str, Any] = type_sequence_label_size __snake_case : List[str] = initializer_range __snake_case : Optional[int] = mask_ratio __snake_case : Optional[int] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) __snake_case : List[str] = (image_size // patch_size) ** 2 __snake_case : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __snake_case ( self : Optional[int] ) -> Union[str, Any]: __snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : List[Any] = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Optional[Any] = self.get_config() return config, pixel_values, labels def __snake_case ( self : int ) -> Union[str, Any]: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def __snake_case ( self : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] ) -> List[str]: __snake_case : List[Any] = ViTMAEModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __snake_case : Optional[Any] = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case ( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : int ) -> Optional[Any]: __snake_case : Dict = ViTMAEForPreTraining(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __snake_case : List[Any] = model(lowerCamelCase ) __snake_case : List[Any] = (self.image_size // self.patch_size) ** 2 __snake_case : str = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images __snake_case : List[Any] = 1 __snake_case : int = ViTMAEForPreTraining(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __snake_case : Optional[int] = model(lowerCamelCase ) __snake_case : Optional[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def __snake_case ( self : Dict ) -> Tuple: __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case : Optional[int] = config_and_inputs __snake_case : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () __UpperCAmelCase : List[Any] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} __UpperCAmelCase : Any = False __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = False __UpperCAmelCase : Any = False def __snake_case ( self : List[str] ) -> Dict: __snake_case : Union[str, Any] = ViTMAEModelTester(self ) __snake_case : List[Any] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 ) def __snake_case ( self : List[Any] ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def __snake_case ( self : str ) -> Tuple: pass def __snake_case ( self : Any ) -> List[str]: __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) ) def __snake_case ( self : List[str] ) -> Optional[int]: __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = model_class(lowerCamelCase ) __snake_case : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : List[Any] = [*signature.parameters.keys()] __snake_case : Optional[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase ) def __snake_case ( self : int ) -> Optional[Any]: __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def __snake_case ( self : Any ) -> Any: __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase ) def __snake_case ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any ) -> str: # make masks reproducible np.random.seed(2 ) __snake_case : List[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) __snake_case : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) __snake_case : Any = torch.from_numpy(lowerCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument __snake_case : Optional[int] = pt_noise super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def __snake_case ( self : List[Any] ) -> Optional[Any]: __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): __snake_case : str = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) ) __snake_case : List[Any] = outputs[0].cpu().numpy() __snake_case : Union[str, Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase ) __snake_case : List[Any] = model_class.from_pretrained(lowerCamelCase ) model.to(lowerCamelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): __snake_case : str = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) ) # Make sure we don't have nans __snake_case : Any = after_outputs[0].cpu().numpy() __snake_case : Tuple = 0 __snake_case : List[str] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase , 1E-5 ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def __snake_case ( self : Optional[Any] ) -> Optional[int]: pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def __snake_case ( self : Dict ) -> List[Any]: pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def __snake_case ( self : int ) -> List[Any]: pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def __snake_case ( self : Dict ) -> Dict: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __snake_case ( self : List[str] ) -> Any: pass @slow def __snake_case ( self : int ) -> str: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : List[Any] = ViTMAEModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def __lowercase ( ): __snake_case : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a (unittest.TestCase ): """simple docstring""" @cached_property def __snake_case ( self : List[Any] ) -> List[Any]: return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def __snake_case ( self : Tuple ) -> Optional[Any]: # make random mask reproducible across the PT and TF model np.random.seed(2 ) __snake_case : Optional[int] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowerCamelCase ) __snake_case : List[Any] = self.default_image_processor __snake_case : str = prepare_img() __snake_case : List[str] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) __snake_case : Tuple = ViTMAEConfig() __snake_case : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) __snake_case : Optional[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): __snake_case : int = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) ) # verify the logits __snake_case : Tuple = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) __snake_case : Any = torch.tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1E-4 ) )
355
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput _snake_case : int = "scheduler_config.json" class a (_lowerCAmelCase ): """simple docstring""" __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : Tuple = 2 __UpperCAmelCase : Union[str, Any] = 3 __UpperCAmelCase : List[Any] = 4 __UpperCAmelCase : Tuple = 5 @dataclass class a (_lowerCAmelCase ): """simple docstring""" __UpperCAmelCase : jnp.ndarray class a : """simple docstring""" __UpperCAmelCase : Dict = SCHEDULER_CONFIG_NAME __UpperCAmelCase : Union[str, Any] = ["dtype"] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : int = True @classmethod def __snake_case ( cls : List[str] , lowerCamelCase : Dict[str, Any] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : List[str]=False , **lowerCamelCase : Union[str, Any] , ) -> List[str]: __snake_case , __snake_case : List[str] = cls.load_config( pretrained_model_name_or_path=lowerCamelCase , subfolder=lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase , ) __snake_case , __snake_case : Dict = cls.from_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase ) if hasattr(lowerCamelCase , "create_state" ) and getattr(lowerCamelCase , "has_state" , lowerCamelCase ): __snake_case : Tuple = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __snake_case ( self : Any , lowerCamelCase : Union[str, os.PathLike] , lowerCamelCase : bool = False , **lowerCamelCase : List[Any] ) -> int: self.save_config(save_directory=lowerCamelCase , push_to_hub=lowerCamelCase , **lowerCamelCase ) @property def __snake_case ( self : Tuple ) -> List[Any]: return self._get_compatibles() @classmethod def __snake_case ( cls : int ) -> Dict: __snake_case : Tuple = list(set([cls.__name__] + cls._compatibles ) ) __snake_case : int = importlib.import_module(__name__.split("." )[0] ) __snake_case : Tuple = [ getattr(lowerCamelCase , lowerCamelCase ) for c in compatible_classes_str if hasattr(lowerCamelCase , lowerCamelCase ) ] return compatible_classes def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): assert len(__lowerCamelCase ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__lowerCamelCase ) - x.ndim) ) , __lowerCamelCase ) def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=0.9_9_9 , __lowerCamelCase=jnp.floataa ): def alpha_bar(__lowerCamelCase ): return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 __snake_case : List[Any] = [] for i in range(__lowerCamelCase ): __snake_case : Dict = i / num_diffusion_timesteps __snake_case : str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(__lowerCamelCase ) / alpha_bar(__lowerCamelCase ) , __lowerCamelCase ) ) return jnp.array(__lowerCamelCase , dtype=__lowerCamelCase ) @flax.struct.dataclass class a : """simple docstring""" __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray @classmethod def __snake_case ( cls : Union[str, Any] , lowerCamelCase : int ) -> List[Any]: __snake_case : Dict = scheduler.config if config.trained_betas is not None: __snake_case : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": __snake_case : Optional[int] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __snake_case : str = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __snake_case : Optional[Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' ) __snake_case : Any = 1.0 - betas __snake_case : int = jnp.cumprod(lowerCamelCase , axis=0 ) return cls( alphas=lowerCamelCase , betas=lowerCamelCase , alphas_cumprod=lowerCamelCase , ) def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case : List[Any] = state.alphas_cumprod __snake_case : str = alphas_cumprod[timesteps] ** 0.5 __snake_case : Dict = sqrt_alpha_prod.flatten() __snake_case : str = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape ) __snake_case : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5 __snake_case : str = sqrt_one_minus_alpha_prod.flatten() __snake_case : Tuple = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case , __snake_case : Union[str, Any] = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) __snake_case : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case , __snake_case : Dict = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) __snake_case : Optional[int] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
134
0
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html snake_case__ : List[Any] = '''platform''' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class snake_case_: __UpperCamelCase = PegasusConfig __UpperCamelCase = {} __UpperCamelCase = '''gelu''' def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=1_3 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Optional[Any]=9_9 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : List[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : str=3_7 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=2_0 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Any=0 , ): lowerCAmelCase : List[Any] = parent lowerCAmelCase : Optional[int] = batch_size lowerCAmelCase : Any = seq_length lowerCAmelCase : Dict = is_training lowerCAmelCase : Optional[int] = use_labels lowerCAmelCase : Union[str, Any] = vocab_size lowerCAmelCase : Tuple = hidden_size lowerCAmelCase : Any = num_hidden_layers lowerCAmelCase : List[str] = num_attention_heads lowerCAmelCase : Optional[Any] = intermediate_size lowerCAmelCase : Optional[int] = hidden_dropout_prob lowerCAmelCase : List[Any] = attention_probs_dropout_prob lowerCAmelCase : str = max_position_embeddings lowerCAmelCase : str = eos_token_id lowerCAmelCase : List[Any] = pad_token_id lowerCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) lowerCAmelCase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase : Optional[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCAmelCase : Dict = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return config, inputs_dict def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ): lowerCAmelCase : Any = 2_0 lowerCAmelCase : Any = model_class_name(UpperCamelCase_ ) lowerCAmelCase : List[str] = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : Optional[Any] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : Any = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowerCAmelCase : Dict = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : int = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ): lowerCAmelCase : Dict = 2_0 lowerCAmelCase : Union[str, Any] = model_class_name(UpperCamelCase_ ) lowerCAmelCase : Any = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : str = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : Any = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ ) lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def _snake_case ( _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=None , _snake_case : Dict=None , ): if attention_mask is None: lowerCAmelCase : Tuple = np.not_equal(_snake_case , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: lowerCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __UpperCamelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[Any] = FlaxPegasusModelTester(self ) lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Dict ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : str = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = model_class(UpperCamelCase_ ) @jax.jit def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Tuple ): return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : Tuple = encode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : Dict = encode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : Optional[int] = model_class(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowerCAmelCase : Any = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ): return model.decode( decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : Optional[Any] = decode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : Any = decode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : str ): for model_class_name in self.all_model_classes: lowerCAmelCase : int = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=UpperCamelCase_ ) lowerCAmelCase : List[Any] = np.ones((1, 1) ) lowerCAmelCase : str = model(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : int ): lowerCAmelCase : Any = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' ) lowerCAmelCase : List[Any] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' ) lowerCAmelCase : int = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] lowerCAmelCase : str = [ '''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''', '''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''', ] lowerCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors='''np''' , truncation=UpperCamelCase_ , max_length=5_1_2 , padding=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model.generate(**UpperCamelCase_ , num_beams=2 ).sequences lowerCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) assert tgt_text == decoded
60
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() snake_case__ : int = logging.get_logger(__name__) def _snake_case ( _snake_case : Union[str, Any] ): lowerCAmelCase : Dict = OrderedDict() for key, value in state_dict.items(): if key.startswith('''module.encoder''' ): lowerCAmelCase : Union[str, Any] = key.replace('''module.encoder''' , '''glpn.encoder''' ) if key.startswith('''module.decoder''' ): lowerCAmelCase : str = key.replace('''module.decoder''' , '''decoder.stages''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase : Union[str, Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] lowerCAmelCase : str = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(_snake_case )-1}''' ) if "norm" in key: lowerCAmelCase : str = key.replace('''norm''' , '''layer_norm''' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase : Optional[int] = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )] lowerCAmelCase : List[str] = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(_snake_case )-1}''' ) if "layer_norm1" in key: lowerCAmelCase : Union[str, Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: lowerCAmelCase : Any = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase : Tuple = key[key.find('''block''' ) + len('''block''' )] lowerCAmelCase : Tuple = key.replace(f'''block{idx}''' , f'''block.{int(_snake_case )-1}''' ) if "attn.q" in key: lowerCAmelCase : Optional[Any] = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: lowerCAmelCase : Dict = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: lowerCAmelCase : List[str] = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: lowerCAmelCase : List[Any] = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: lowerCAmelCase : Optional[Any] = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: lowerCAmelCase : List[Any] = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: lowerCAmelCase : Optional[Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) lowerCAmelCase : int = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase : Optional[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )] lowerCAmelCase : int = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(_snake_case )-1}''' ) if "bot_conv" in key: lowerCAmelCase : str = key.replace('''bot_conv''' , '''0.convolution''' ) if "skip_conv1" in key: lowerCAmelCase : int = key.replace('''skip_conv1''' , '''1.convolution''' ) if "skip_conv2" in key: lowerCAmelCase : str = key.replace('''skip_conv2''' , '''2.convolution''' ) if "fusion1" in key: lowerCAmelCase : Union[str, Any] = key.replace('''fusion1''' , '''1.fusion''' ) if "fusion2" in key: lowerCAmelCase : Any = key.replace('''fusion2''' , '''2.fusion''' ) if "fusion3" in key: lowerCAmelCase : List[Any] = key.replace('''fusion3''' , '''3.fusion''' ) if "fusion" in key and "conv" in key: lowerCAmelCase : Union[str, Any] = key.replace('''conv''' , '''convolutional_layer''' ) if key.startswith('''module.last_layer_depth''' ): lowerCAmelCase : Optional[Any] = key.replace('''module.last_layer_depth''' , '''head.head''' ) lowerCAmelCase : Union[str, Any] = value return new_state_dict def _snake_case ( _snake_case : Optional[Any] , _snake_case : str ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase : int = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase : Optional[int] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase : str = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase : Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase : Dict = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase : List[str] = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ): lowerCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase : str = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return image @torch.no_grad() def _snake_case ( _snake_case : Dict , _snake_case : Dict , _snake_case : Union[str, Any]=False , _snake_case : List[str]=None ): lowerCAmelCase : Optional[int] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) lowerCAmelCase : Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase : Tuple = prepare_img() lowerCAmelCase : Dict = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values logger.info('''Converting model...''' ) # load original state dict lowerCAmelCase : List[str] = torch.load(_snake_case , map_location=torch.device('''cpu''' ) ) # rename keys lowerCAmelCase : Tuple = rename_keys(_snake_case ) # key and value matrices need special treatment read_in_k_v(_snake_case , _snake_case ) # create HuggingFace model and load state dict lowerCAmelCase : str = GLPNForDepthEstimation(_snake_case ) model.load_state_dict(_snake_case ) model.eval() # forward pass lowerCAmelCase : Union[str, Any] = model(_snake_case ) lowerCAmelCase : int = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase : str = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase : str = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f'''Unknown model name: {model_name}''' ) lowerCAmelCase : List[Any] = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 ) print('''Looks ok!''' ) # finally, push to hub if required if push_to_hub: logger.info('''Pushing model and image processor to the hub...''' ) model.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_snake_case , ) if __name__ == "__main__": snake_case__ : Tuple = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) parser.add_argument( '''--model_name''', default='''glpn-kitti''', type=str, help='''Name of the model in case you\'re pushing to the hub.''', ) snake_case__ : List[str] = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
60
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
270
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __lowerCAmelCase = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=16 , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=14 , UpperCAmelCase=10 , UpperCAmelCase=19 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=[1, 2, 3, 4, 5] , UpperCAmelCase=25 , UpperCAmelCase=5 , ) -> int: _snake_case = d_model _snake_case = parent _snake_case = batch_size _snake_case = prediction_length _snake_case = context_length _snake_case = cardinality _snake_case = num_time_features _snake_case = lags_sequence _snake_case = embedding_dimension _snake_case = is_training _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = context_length _snake_case = prediction_length + label_length _snake_case = label_length _snake_case = moving_average _snake_case = autocorrelation_factor def lowercase (self ) -> str: return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def lowercase (self , UpperCAmelCase ) -> Tuple: _snake_case = config.context_length + max(config.lags_sequence ) _snake_case = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) _snake_case = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) _snake_case = floats_tensor([self.batch_size, _past_length] ) _snake_case = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs _snake_case = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) _snake_case = floats_tensor([self.batch_size, config.prediction_length] ) _snake_case = { """past_values""": past_values, """static_categorical_features""": static_categorical_features, """past_time_features""": past_time_features, """past_observed_mask""": past_observed_mask, """future_time_features""": future_time_features, """future_values""": future_values, } return inputs_dict def lowercase (self ) -> int: _snake_case = self.get_config() _snake_case = self.prepare_autoformer_inputs_dict(UpperCAmelCase ) return config, inputs_dict def lowercase (self ) -> List[Any]: _snake_case, _snake_case = self.prepare_config_and_inputs() return config, inputs_dict def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Tuple: _snake_case = AutoformerModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval() _snake_case = model(**UpperCAmelCase ) _snake_case = outputs.encoder_last_hidden_state _snake_case = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = model.get_encoder() encoder.save_pretrained(UpperCAmelCase ) _snake_case = AutoformerEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase ) _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = model.create_network_inputs(**UpperCAmelCase ) _snake_case, _snake_case = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) _snake_case = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) _snake_case = encoder(inputs_embeds=UpperCAmelCase )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) _snake_case = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) _snake_case = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) _snake_case = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) _snake_case = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = model.get_decoder() decoder.save_pretrained(UpperCAmelCase ) _snake_case = AutoformerDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase ) _snake_case = decoder( trend=UpperCAmelCase , inputs_embeds=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () lowerCAmelCase_ = (AutoformerForPrediction,) if is_torch_available() else () lowerCAmelCase_ = {"feature-extraction": AutoformerModel} if is_torch_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowercase (self ) -> List[Any]: _snake_case = AutoformerModelTester(self ) _snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase ) def lowercase (self ) -> List[Any]: self.config_tester.run_common_tests() def lowercase (self ) -> Any: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase ) _snake_case, _snake_case = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase ) self.assertEqual(info["""missing_keys"""] , [] ) def lowercase (self ) -> List[Any]: _snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase ) @unittest.skip(reason="""Model has no tokens embeddings""" ) def lowercase (self ) -> Tuple: pass def lowercase (self ) -> Any: _snake_case = inspect.signature(getattr(UpperCAmelCase , """forward""" ) ) # The main input is the name of the argument after `self` _snake_case = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , UpperCAmelCase ) def lowercase (self ) -> List[Any]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = [ """past_values""", """past_time_features""", """past_observed_mask""", """static_categorical_features""", """static_real_features""", """future_values""", """future_time_features""", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("""future_observed_mask""" ) expected_arg_names.extend( [ """decoder_attention_mask""", """head_mask""", """decoder_head_mask""", """cross_attn_head_mask""", """encoder_outputs""", """past_key_values""", """output_hidden_states""", """output_attentions""", """use_cache""", """return_dict""", ] ) self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase ) def lowercase (self ) -> List[Any]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = True _snake_case = getattr(self.model_tester , """seq_length""" , UpperCAmelCase ) _snake_case = getattr(self.model_tester , """decoder_seq_length""" , UpperCAmelCase ) _snake_case = getattr(self.model_tester , """encoder_seq_length""" , UpperCAmelCase ) _snake_case = getattr(self.model_tester , """d_model""" , UpperCAmelCase ) _snake_case = getattr(self.model_tester , """num_attention_heads""" , UpperCAmelCase ) _snake_case = d_model // num_attention_heads for model_class in self.all_model_classes: _snake_case = True _snake_case = False _snake_case = True _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _snake_case = True _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) _snake_case = outputs.encoder_attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) _snake_case = len(UpperCAmelCase ) _snake_case = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(UpperCAmelCase , UpperCAmelCase ) # decoder attentions _snake_case = outputs.decoder_attentions self.assertIsInstance(UpperCAmelCase , (list, tuple) ) self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions _snake_case = outputs.cross_attentions self.assertIsInstance(UpperCAmelCase , (list, tuple) ) self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine _snake_case = True _snake_case = True _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(out_len + 2 , len(UpperCAmelCase ) ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def lowercase (self ) -> List[Any]: super().test_retain_grad_hidden_states_attentions() def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE="train-batch.pt" ): _snake_case = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) _snake_case = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE ) return batch @require_torch @slow class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> Union[str, Any]: _snake_case = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase ) _snake_case = prepare_batch() with torch.no_grad(): _snake_case = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0] _snake_case = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , UpperCAmelCase ) _snake_case = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCAmelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def lowercase (self ) -> str: _snake_case = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase ) _snake_case = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): _snake_case = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state _snake_case = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , UpperCAmelCase ) _snake_case = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCAmelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def lowercase (self ) -> Optional[int]: _snake_case = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase ) _snake_case = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): _snake_case = model.generate( static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , ) _snake_case = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , UpperCAmelCase ) _snake_case = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCAmelCase ) _snake_case = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCAmelCase , rtol=1e-1 ) )
270
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ : Optional[Any] = logging.get_logger(__name__) A_ : Union[str, Any] = { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json' ), 'distilbert-base-uncased-finetuned-sst-2-english': ( 'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json' ), } class A_ ( _a ): '''simple docstring''' a__ = "distilbert" a__ = { "hidden_size": "dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", } def __init__(self , lowercase__=30_522 , lowercase__=512 , lowercase__=False , lowercase__=6 , lowercase__=12 , lowercase__=768 , lowercase__=4 * 768 , lowercase__=0.1 , lowercase__=0.1 , lowercase__="gelu" , lowercase__=0.02 , lowercase__=0.1 , lowercase__=0.2 , lowercase__=0 , **lowercase__ , ) -> List[Any]: __UpperCAmelCase = vocab_size __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = sinusoidal_pos_embds __UpperCAmelCase = n_layers __UpperCAmelCase = n_heads __UpperCAmelCase = dim __UpperCAmelCase = hidden_dim __UpperCAmelCase = dropout __UpperCAmelCase = attention_dropout __UpperCAmelCase = activation __UpperCAmelCase = initializer_range __UpperCAmelCase = qa_dropout __UpperCAmelCase = seq_classif_dropout super().__init__(**lowercase__ , pad_token_id=lowercase__ ) class A_ ( _a ): '''simple docstring''' @property def lowerCAmelCase_ (self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
333
def __a ( ) -> list[list[int]]: '''simple docstring''' return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )] A_ : Union[str, Any] = generate_large_matrix() A_ : Union[str, Any] = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __a ( SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid ) assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) ) def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' __UpperCAmelCase = 0 __UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: __UpperCAmelCase = (left + right) // 2 __UpperCAmelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: __UpperCAmelCase = mid + 1 else: __UpperCAmelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(SCREAMING_SNAKE_CASE ) def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' __UpperCAmelCase = 0 __UpperCAmelCase = len(grid[0] ) for i in range(len(SCREAMING_SNAKE_CASE ) ): __UpperCAmelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __a ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' __UpperCAmelCase = 0 for row in grid: for i, number in enumerate(SCREAMING_SNAKE_CASE ): if number < 0: total += len(SCREAMING_SNAKE_CASE ) - i break return total def __a ( ) -> None: '''simple docstring''' from timeit import timeit print('''Running benchmarks''' ) __UpperCAmelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): __UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 ) print(f'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
333
1
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''): __lowerCamelCase : Optional[Any] = { '''linear''': PIL.Image.Resampling.BILINEAR, '''bilinear''': PIL.Image.Resampling.BILINEAR, '''bicubic''': PIL.Image.Resampling.BICUBIC, '''lanczos''': PIL.Image.Resampling.LANCZOS, '''nearest''': PIL.Image.Resampling.NEAREST, } else: __lowerCamelCase : int = { '''linear''': PIL.Image.LINEAR, '''bilinear''': PIL.Image.BILINEAR, '''bicubic''': PIL.Image.BICUBIC, '''lanczos''': PIL.Image.LANCZOS, '''nearest''': PIL.Image.NEAREST, } def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = (images / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE__ = numpy_to_pil(__UpperCamelCase ) return images def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int: """simple docstring""" if images.ndim == 3: SCREAMING_SNAKE_CASE__ = images[None, ...] SCREAMING_SNAKE_CASE__ = (images * 2_55).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images SCREAMING_SNAKE_CASE__ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images] else: SCREAMING_SNAKE_CASE__ = [Image.fromarray(__UpperCamelCase ) for image in images] return pil_images
204
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __snake_case : def __init__( self : Any , _lowercase : Tuple , _lowercase : str=2 , _lowercase : List[Any]=3 , _lowercase : Optional[Any]=4 , _lowercase : Optional[Any]=2 , _lowercase : str=7 , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=True , _lowercase : Dict=99 , _lowercase : Dict=36 , _lowercase : Tuple=2 , _lowercase : Optional[int]=4 , _lowercase : int=37 , _lowercase : Tuple="gelu" , _lowercase : Optional[Any]=0.1 , _lowercase : Tuple=0.1 , _lowercase : str=5_12 , _lowercase : Dict=16 , _lowercase : int=2 , _lowercase : int=0.02 , _lowercase : Any=6 , _lowercase : List[Any]=6 , _lowercase : List[Any]=3 , _lowercase : List[Any]=4 , _lowercase : int=None , _lowercase : Optional[int]=10_00 , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = num_channels SCREAMING_SNAKE_CASE__ = image_size SCREAMING_SNAKE_CASE__ = patch_size SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_mask SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = coordinate_size SCREAMING_SNAKE_CASE__ = shape_size SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = scope SCREAMING_SNAKE_CASE__ = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) SCREAMING_SNAKE_CASE__ = text_seq_length SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2 + 1 SCREAMING_SNAKE_CASE__ = self.text_seq_length + self.image_seq_length def __a ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) SCREAMING_SNAKE_CASE__ = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE__ = bbox[i, j, 3] SCREAMING_SNAKE_CASE__ = bbox[i, j, 1] SCREAMING_SNAKE_CASE__ = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE__ = bbox[i, j, 2] SCREAMING_SNAKE_CASE__ = bbox[i, j, 0] SCREAMING_SNAKE_CASE__ = tmp_coordinate SCREAMING_SNAKE_CASE__ = tf.constant(_lowercase ) SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.text_seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __a ( self : List[str] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : str , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModel(config=_lowercase ) # text + image SCREAMING_SNAKE_CASE__ = model(_lowercase , pixel_values=_lowercase , training=_lowercase ) SCREAMING_SNAKE_CASE__ = model( _lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , training=_lowercase , ) SCREAMING_SNAKE_CASE__ = model(_lowercase , bbox=_lowercase , pixel_values=_lowercase , training=_lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only SCREAMING_SNAKE_CASE__ = model(_lowercase , training=_lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only SCREAMING_SNAKE_CASE__ = model({"""pixel_values""": pixel_values} , training=_lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __a ( self : int , _lowercase : int , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = TFLayoutLMvaForSequenceClassification(config=_lowercase ) SCREAMING_SNAKE_CASE__ = model( _lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , training=_lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : Any , _lowercase : Dict , _lowercase : Tuple , _lowercase : int , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = TFLayoutLMvaForTokenClassification(config=_lowercase ) SCREAMING_SNAKE_CASE__ = model( _lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , training=_lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __a ( self : str , _lowercase : int , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 2 SCREAMING_SNAKE_CASE__ = TFLayoutLMvaForQuestionAnswering(config=_lowercase ) SCREAMING_SNAKE_CASE__ = model( _lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , training=_lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = config_and_inputs SCREAMING_SNAKE_CASE__ = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): lowerCAmelCase_ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def __a ( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[Any] ): """simple docstring""" return True def __a ( self : List[str] , _lowercase : List[Any] , _lowercase : str , _lowercase : str=False ): """simple docstring""" SCREAMING_SNAKE_CASE__ = copy.deepcopy(_lowercase ) if model_class in get_values(_lowercase ): SCREAMING_SNAKE_CASE__ = { k: tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(_lowercase , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_lowercase ): SCREAMING_SNAKE_CASE__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_lowercase ): SCREAMING_SNAKE_CASE__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) SCREAMING_SNAKE_CASE__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_lowercase ): SCREAMING_SNAKE_CASE__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_lowercase ): SCREAMING_SNAKE_CASE__ = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 ) def __a ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ = model_class(_lowercase ) if getattr(_lowercase , """hf_compute_loss""" , _lowercase ): # The number of elements in the loss should be the same as the number of elements in the label SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase ) SCREAMING_SNAKE_CASE__ = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_lowercase )[0] ] SCREAMING_SNAKE_CASE__ = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase ) SCREAMING_SNAKE_CASE__ = prepared_for_class.pop("""input_ids""" ) SCREAMING_SNAKE_CASE__ = model(_lowercase , **_lowercase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase ) SCREAMING_SNAKE_CASE__ = prepared_for_class.pop("""input_ids""" ) if "labels" in prepared_for_class: SCREAMING_SNAKE_CASE__ = prepared_for_class["""labels"""].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: SCREAMING_SNAKE_CASE__ = -1_00 SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(_lowercase ) SCREAMING_SNAKE_CASE__ = model(_lowercase , **_lowercase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase ) SCREAMING_SNAKE_CASE__ = model(_lowercase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple SCREAMING_SNAKE_CASE__ = self._prepare_for_class(inputs_dict.copy() , _lowercase , return_labels=_lowercase ) # Get keys that were added with the _prepare_for_class function SCREAMING_SNAKE_CASE__ = prepared_for_class.keys() - inputs_dict.keys() SCREAMING_SNAKE_CASE__ = inspect.signature(model.call ).parameters SCREAMING_SNAKE_CASE__ = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple SCREAMING_SNAKE_CASE__ = {0: """input_ids"""} for label_key in label_keys: SCREAMING_SNAKE_CASE__ = signature_names.index(_lowercase ) SCREAMING_SNAKE_CASE__ = label_key SCREAMING_SNAKE_CASE__ = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple SCREAMING_SNAKE_CASE__ = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: SCREAMING_SNAKE_CASE__ = prepared_for_class[value] SCREAMING_SNAKE_CASE__ = tuple(_lowercase ) # Send to model SCREAMING_SNAKE_CASE__ = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __a ( self : Optional[int] ): """simple docstring""" ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) def __a ( self : int ): """simple docstring""" ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE__ = type self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) def __a ( self : int ): """simple docstring""" ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) def __a ( self : str ): """simple docstring""" ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) def __a ( self : List[str] ): """simple docstring""" ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) @slow def __a ( self : Tuple ): """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModel.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) def __SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf class __snake_case ( unittest.TestCase ): @cached_property def __a ( self : Optional[int] ): """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=_lowercase ) if is_vision_available() else None @slow def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ) SCREAMING_SNAKE_CASE__ = self.default_image_processor SCREAMING_SNAKE_CASE__ = prepare_img() SCREAMING_SNAKE_CASE__ = image_processor(images=_lowercase , return_tensors="""tf""" ).pixel_values SCREAMING_SNAKE_CASE__ = tf.constant([[1, 2]] ) SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass SCREAMING_SNAKE_CASE__ = model(input_ids=_lowercase , bbox=_lowercase , pixel_values=_lowercase , training=_lowercase ) # verify the logits SCREAMING_SNAKE_CASE__ = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , _lowercase ) SCREAMING_SNAKE_CASE__ = tf.constant( [[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1E-4 ) )
204
1
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() A =logging.get_logger(__name__) def snake_case_ (_a : Dict , _a : Dict , _a : Optional[int] , _a : str ): UpperCAmelCase = original_name.split('''.''' )[0] UpperCAmelCase = key.split('''.''' ) UpperCAmelCase = int(key_list[key_list.index(__SCREAMING_SNAKE_CASE ) - 2] ) UpperCAmelCase = int(key_list[key_list.index(__SCREAMING_SNAKE_CASE ) - 1] ) UpperCAmelCase = orig_block_num - offset UpperCAmelCase = key.replace(F"{orig_block_num}.{layer_num}.{original_name}" , F"block.{new_block_num}.{layer_num}.{new_name}" ) return key def snake_case_ (_a : Any ): UpperCAmelCase = OrderedDict() UpperCAmelCase , UpperCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): UpperCAmelCase = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 UpperCAmelCase = key[: key.find('''proj''' )] UpperCAmelCase = key.replace(__SCREAMING_SNAKE_CASE , F"patch_embeddings.{total_embed_found}." ) UpperCAmelCase = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: UpperCAmelCase = '''poolformer.encoder.''' + key if "mlp.fc1" in key: UpperCAmelCase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: UpperCAmelCase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: UpperCAmelCase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' ) if "norm2" in key: UpperCAmelCase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: UpperCAmelCase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: UpperCAmelCase = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: UpperCAmelCase = key.replace('''head''' , '''classifier''' ) UpperCAmelCase = value return new_state_dict def snake_case_ (): UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return image @torch.no_grad() def snake_case_ (_a : Tuple , _a : Any , _a : str ): UpperCAmelCase = PoolFormerConfig() # set attributes based on model_name UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = model_name[-3:] UpperCAmelCase = 1_0_0_0 UpperCAmelCase = '''imagenet-1k-id2label.json''' UpperCAmelCase = (1, 1_0_0_0) # set config attributes UpperCAmelCase = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": UpperCAmelCase = [2, 2, 6, 2] UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] UpperCAmelCase = 4.0 UpperCAmelCase = 0.9 elif size == "s24": UpperCAmelCase = [4, 4, 1_2, 4] UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] UpperCAmelCase = 4.0 UpperCAmelCase = 0.9 elif size == "s36": UpperCAmelCase = [6, 6, 1_8, 6] UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] UpperCAmelCase = 4.0 UpperCAmelCase = 1E-6 UpperCAmelCase = 0.9 elif size == "m36": UpperCAmelCase = [6, 6, 1_8, 6] UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] UpperCAmelCase = 4.0 UpperCAmelCase = 1E-6 UpperCAmelCase = 0.95 elif size == "m48": UpperCAmelCase = [8, 8, 2_4, 8] UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] UpperCAmelCase = 4.0 UpperCAmelCase = 1E-6 UpperCAmelCase = 0.95 else: raise ValueError(F"Size {size} not supported" ) # load image processor UpperCAmelCase = PoolFormerImageProcessor(crop_pct=__SCREAMING_SNAKE_CASE ) # Prepare image UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values logger.info(F"Converting model {model_name}..." ) # load original state dict UpperCAmelCase = torch.load(__SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) ) # rename keys UpperCAmelCase = rename_keys(__SCREAMING_SNAKE_CASE ) # create HuggingFace model and load state dict UpperCAmelCase = PoolFormerForImageClassification(__SCREAMING_SNAKE_CASE ) model.load_state_dict(__SCREAMING_SNAKE_CASE ) model.eval() # Define image processor UpperCAmelCase = PoolFormerImageProcessor(crop_pct=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ) UpperCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": UpperCAmelCase = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": UpperCAmelCase = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": UpperCAmelCase = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": UpperCAmelCase = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": UpperCAmelCase = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(F"Size {size} not supported" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-2 ) # finally, save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": A =argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) A =parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
34
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter UpperCAmelCase = '''Create a default config file for Accelerate with only a few flags set.''' def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE="no" , __SCREAMING_SNAKE_CASE = default_json_config_file , __SCREAMING_SNAKE_CASE = False ): lowercase = Path(__SCREAMING_SNAKE_CASE ) path.parent.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False lowercase = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) lowercase = { 'compute_environment': 'LOCAL_MACHINE', 'mixed_precision': mixed_precision, } if torch.cuda.is_available(): lowercase = torch.cuda.device_count() lowercase = num_gpus lowercase = False if num_gpus > 1: lowercase = 'MULTI_GPU' else: lowercase = 'NO' elif is_xpu_available() and use_xpu: lowercase = torch.xpu.device_count() lowercase = num_xpus lowercase = False if num_xpus > 1: lowercase = 'MULTI_XPU' else: lowercase = 'NO' elif is_npu_available(): lowercase = torch.npu.device_count() lowercase = num_npus lowercase = False if num_npus > 1: lowercase = 'MULTI_NPU' else: lowercase = 'NO' else: lowercase = 0 lowercase = True lowercase = 1 lowercase = 'NO' lowercase = ClusterConfig(**__SCREAMING_SNAKE_CASE ) config.to_json_file(__SCREAMING_SNAKE_CASE ) return path def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase = parser.add_parser('default' , parents=__SCREAMING_SNAKE_CASE , help=__SCREAMING_SNAKE_CASE , formatter_class=__SCREAMING_SNAKE_CASE ) parser.add_argument( '--config_file' , default=__SCREAMING_SNAKE_CASE , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , dest='save_location' , ) parser.add_argument( '--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__SCREAMING_SNAKE_CASE , help='Whether or not to use mixed precision training. ' 'Choose between FP16 and BF16 (bfloat16) training. ' 'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , ) parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) return parser def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
195
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A : Union[str, Any] = { "configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"], "tokenization_biogpt": ["BioGptTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "BioGptForCausalLM", "BioGptForTokenClassification", "BioGptForSequenceClassification", "BioGptModel", "BioGptPreTrainedModel", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
365
"""simple docstring""" import random def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ): '''simple docstring''' __lowerCAmelCase = {i: [] for i in range(_UpperCamelCase )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(_UpperCamelCase ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(_UpperCamelCase ): for j in range(i + 1 , _UpperCamelCase ): if random.random() < probability: graph[i].append(_UpperCamelCase ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(_UpperCamelCase ) return graph def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return { i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase ) } if __name__ == "__main__": import doctest doctest.testmod()
259
0
"""simple docstring""" from math import factorial def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float ) -> float: '''simple docstring''' if successes > trials: raise ValueError("""successes must be lower or equal to trials""" ) if trials < 0 or successes < 0: raise ValueError("""the function is defined for non-negative integers""" ) if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(__UpperCamelCase , __UpperCamelCase ): raise ValueError("""the function is defined for non-negative integers""" ) if not 0 < prob < 1: raise ValueError("""prob has to be in range of 1 - 0""" ) __UpperCAmelCase : Tuple = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! __UpperCAmelCase : int = float(factorial(__UpperCamelCase ) ) coefficient /= factorial(__UpperCamelCase ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('Probability of 2 successes out of 4 trails') print('with probability of 0.75 is:', end=' ') print(binomial_distribution(2, 4, 0.75))
115
import socket def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) SCREAMING_SNAKE_CASE__ = socket.gethostname() SCREAMING_SNAKE_CASE__ = 1_23_12 sock.connect((host, port) ) sock.send(B"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: SCREAMING_SNAKE_CASE__ = sock.recv(10_24 ) if not data: break out_file.write(__UpperCamelCase ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
219
0
"""simple docstring""" def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: while a != 0: lowerCAmelCase__ : Optional[Any] = b % a, a return b def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: if gcd(__UpperCAmelCase , __UpperCAmelCase ) != 1: lowerCAmelCase__ : Optional[Any] = f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = 1, 0, a lowerCAmelCase__ : Any = 0, 1, m while va != 0: lowerCAmelCase__ : Optional[Any] = ua // va lowerCAmelCase__ : Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
368
"""simple docstring""" import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _A = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _A = 2_5_6_0_4_7 _A = 2_5_6_1_4_5 @require_sentencepiece @require_tokenizers class _lowerCamelCase ( a_ , unittest.TestCase ): _lowerCamelCase :Any = NllbTokenizer _lowerCamelCase :Dict = NllbTokenizerFast _lowerCamelCase :str = True _lowerCamelCase :Optional[Any] = True _lowerCamelCase :Union[str, Any] = {} def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Optional[int] = NllbTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Dict = NllbTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) lowerCAmelCase__ : List[str] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) lowerCAmelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowerCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" lowerCAmelCase__ : str = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) lowerCAmelCase__ : str = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) lowerCAmelCase__ : int = tempfile.mkdtemp() lowerCAmelCase__ : Tuple = tokenizer_r.save_pretrained(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) lowerCAmelCase__ : Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(UpperCamelCase , UpperCamelCase ) # Checks everything loads correctly in the same way lowerCAmelCase__ : int = tokenizer_r.from_pretrained(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tokenizer_p.from_pretrained(UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) ) shutil.rmtree(UpperCamelCase ) # Save tokenizer rust, legacy_format=True lowerCAmelCase__ : List[str] = tempfile.mkdtemp() lowerCAmelCase__ : Optional[Any] = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase ) lowerCAmelCase__ : List[str] = tokenizer_p.save_pretrained(UpperCamelCase ) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase , UpperCamelCase ) # Checks everything loads correctly in the same way lowerCAmelCase__ : List[str] = tokenizer_r.from_pretrained(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) ) shutil.rmtree(UpperCamelCase ) # Save tokenizer rust, legacy_format=False lowerCAmelCase__ : List[Any] = tempfile.mkdtemp() lowerCAmelCase__ : int = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase ) lowerCAmelCase__ : str = tokenizer_p.save_pretrained(UpperCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCAmelCase__ : Dict = tokenizer_r.from_pretrained(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = tokenizer_p.from_pretrained(UpperCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) ) shutil.rmtree(UpperCamelCase ) @require_torch def _lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" if not self.test_seqaseq: return lowerCAmelCase__ : int = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Longer text that will definitely require truncation. lowerCAmelCase__ : Any = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] lowerCAmelCase__ : Optional[int] = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: lowerCAmelCase__ : Dict = tokenizer.prepare_seqaseq_batch( src_texts=UpperCamelCase , tgt_texts=UpperCamelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified lowerCAmelCase__ : str = tokenizer.prepare_seqaseq_batch( UpperCamelCase , tgt_texts=UpperCamelCase , max_length=3 , return_tensors="""pt""" ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) lowerCAmelCase__ : int = tokenizer.prepare_seqaseq_batch( src_texts=UpperCamelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn("""decoder_input_ids""" , UpperCamelCase ) @unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" ) def _lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" pass def _lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase__ : str = [AddedToken("""<special>""" , lstrip=UpperCamelCase )] lowerCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase ) lowerCAmelCase__ : Dict = tokenizer_r.encode("""Hey this is a <special> token""" ) lowerCAmelCase__ : Dict = tokenizer_r.encode("""<special>""" , add_special_tokens=UpperCamelCase )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: lowerCAmelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained( UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , ) lowerCAmelCase__ : Dict = self.tokenizer_class.from_pretrained( UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase ) lowerCAmelCase__ : Optional[int] = tokenizer_p.encode("""Hey this is a <special> token""" ) lowerCAmelCase__ : Dict = tokenizer_cr.encode("""Hey this is a <special> token""" ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): _lowerCamelCase :int = "facebook/nllb-200-distilled-600M" _lowerCamelCase :List[str] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] _lowerCamelCase :Optional[Any] = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] _lowerCamelCase :Tuple = [ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def _lowerCAmelCase ( cls : Optional[Any] ) -> Any: """simple docstring""" lowerCAmelCase__ : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" ) lowerCAmelCase__ : Optional[Any] = 1 return cls def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_60_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_60_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_60_57 ) def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase ) def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" self.assertIn(UpperCamelCase , self.tokenizer.all_special_ids ) # fmt: off lowerCAmelCase__ : str = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47] # fmt: on lowerCAmelCase__ : Any = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) lowerCAmelCase__ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase ) def _lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" lowerCAmelCase__ : List[Any] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , UpperCamelCase ) lowerCAmelCase__ : int = 10 lowerCAmelCase__ : Any = self.tokenizer(UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , UpperCamelCase ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) def _lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_62_03, 3] ) def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : List[Any] = tempfile.mkdtemp() lowerCAmelCase__ : int = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = NllbTokenizer.from_pretrained(UpperCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase ) @require_torch def _lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" lowerCAmelCase__ : Optional[int] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) lowerCAmelCase__ : int = shift_tokens_right( batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) lowerCAmelCase__ : Dict = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase ) self.assertEqual(UpperCamelCase , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def _lowerCAmelCase ( self : Optional[int] ) -> int: """simple docstring""" lowerCAmelCase__ : str = self.tokenizer(self.src_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=3 , return_tensors="""pt""" ) lowerCAmelCase__ : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=10 , return_tensors="""pt""" ) lowerCAmelCase__ : str = targets["""input_ids"""] lowerCAmelCase__ : Any = shift_tokens_right( UpperCamelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Any = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( nested_simplify(UpperCamelCase ) , { # A, test, EOS, en_XX """input_ids""": [[25_60_47, 70, 73_56, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 25_60_57, } , ) @require_torch def _lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : str = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] ) lowerCAmelCase__ : List[str] = False lowerCAmelCase__ : Union[str, Any] = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
212
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a = { 'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'], 'tokenization_xlm': ['XLMTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMForMultipleChoice', 'XLMForQuestionAnswering', 'XLMForQuestionAnsweringSimple', 'XLMForSequenceClassification', 'XLMForTokenClassification', 'XLMModel', 'XLMPreTrainedModel', 'XLMWithLMHeadModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMForMultipleChoice', 'TFXLMForQuestionAnsweringSimple', 'TFXLMForSequenceClassification', 'TFXLMForTokenClassification', 'TFXLMMainLayer', 'TFXLMModel', 'TFXLMPreTrainedModel', 'TFXLMWithLMHeadModel', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ = 1_0 def _lowercase ( self : int ) -> List[str]: lowercase_ = [1, 2, 3, 4] lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> Optional[Any]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> List[Any]: lowercase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = '''''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) lowercase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = ['''It was the best of times.'''] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: lowercase_ = torch.tensor([1, 2, 3, 4] ) lowercase_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() ) def _lowercase ( self : List[Any] ) -> Tuple: lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() ) def _lowercase ( self : int ) -> Dict: lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() ) def _lowercase ( self : List[str] ) -> Tuple: lowercase_ = 1_0_1 lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
30
1
import unittest import numpy as np def UpperCAmelCase_( a__ , a__ , a__ , a__ = None , ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = np.shape(_UpperCAmelCase ) SCREAMING_SNAKE_CASE : int = np.shape(_UpperCAmelCase ) SCREAMING_SNAKE_CASE : Dict = np.shape(_UpperCAmelCase ) if shape_a[0] != shape_b[0]: SCREAMING_SNAKE_CASE : Optional[int] = ( 'Expected the same number of rows for A and B. ' F"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(_UpperCAmelCase ) if shape_b[1] != shape_c[1]: SCREAMING_SNAKE_CASE : List[Any] = ( 'Expected the same number of columns for B and C. ' F"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(_UpperCAmelCase ) SCREAMING_SNAKE_CASE : Any = pseudo_inv if a_inv is None: try: SCREAMING_SNAKE_CASE : Union[str, Any] = np.linalg.inv(_UpperCAmelCase ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->None: SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) SCREAMING_SNAKE_CASE : Dict = np.array([[0, 3], [3, 0], [2, 3]] ) SCREAMING_SNAKE_CASE : str = np.array([[2, 1], [6, 3]] ) SCREAMING_SNAKE_CASE : str = schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.block([[a, b], [b.T, c]] ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.linalg.det(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = np.linalg.det(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = np.linalg.det(_lowerCamelCase ) self.assertAlmostEqual(_lowerCamelCase , det_a * det_s ) def __lowerCAmelCase ( self ) ->None: SCREAMING_SNAKE_CASE : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) SCREAMING_SNAKE_CASE : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(_lowerCamelCase ): schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->None: SCREAMING_SNAKE_CASE : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) SCREAMING_SNAKE_CASE : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) SCREAMING_SNAKE_CASE : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(_lowerCamelCase ): schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
358
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
19
0
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient __UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' ) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1] for i, expression in enumerate(__lowerCamelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = False for line in failures_short_lines.split('''\n''' ): if re.search(r'''_ \[doctest\]''', __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2] elif in_error and not line.split(''' ''' )[0].isdigit(): SCREAMING_SNAKE_CASE_ = line SCREAMING_SNAKE_CASE_ = False return failures class UpperCamelCase__ : """simple docstring""" def __init__( self , _A , _A ) -> Dict: SCREAMING_SNAKE_CASE_ = title SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0] SCREAMING_SNAKE_CASE_ = doc_test_results['''success'''] SCREAMING_SNAKE_CASE_ = doc_test_results['''failures'''] SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures # Failures and success of the modeling tests SCREAMING_SNAKE_CASE_ = doc_test_results @property def _UpperCamelCase ( self ) -> str: SCREAMING_SNAKE_CASE_ = [self._time_spent] SCREAMING_SNAKE_CASE_ = 0 for time in time_spent: SCREAMING_SNAKE_CASE_ = time.split(''':''' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(_A ) == 1: SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3600 + minutes * 60 + seconds SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F'''{int(_A )}h{int(_A )}m{int(_A )}s''' @property def _UpperCamelCase ( self ) -> Dict: return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _UpperCamelCase ( self ) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } @property def _UpperCamelCase ( self ) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": ( F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in''' F''' {self.time}.''' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } @property def _UpperCamelCase ( self ) -> Dict: SCREAMING_SNAKE_CASE_ = 40 SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )} SCREAMING_SNAKE_CASE_ = '''''' for category, failures in category_failures.items(): if len(_A ) == 0: continue if report != "": report += "\n\n" report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(_A ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F'''The following examples had failures:\n\n\n{report}\n''', }, } @property def _UpperCamelCase ( self ) -> str: SCREAMING_SNAKE_CASE_ = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(_A ) @staticmethod def _UpperCamelCase ( ) -> Any: SCREAMING_SNAKE_CASE_ = [ { '''type''': '''section''', '''text''': { '''type''': '''plain_text''', '''text''': '''There was an issue running the tests.''', }, '''accessory''': { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True}, '''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } ] print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(_A )} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , ) def _UpperCamelCase ( self ) -> Optional[int]: print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(self.payload )} ) ) SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.''' SCREAMING_SNAKE_CASE_ = client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , ) def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]: SCREAMING_SNAKE_CASE_ = '''''' for key, value in failures.items(): SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value failures_text += F'''*{key}*\n_{value}_\n\n''' SCREAMING_SNAKE_CASE_ = job_name SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}} if job_link is not None: SCREAMING_SNAKE_CASE_ = { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True}, '''url''': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _UpperCamelCase ( self ) -> int: if self.thread_ts is None: raise ValueError('''Can only post reply if a post has been made.''' ) SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' ) self.doc_test_results.pop('''failures''' ) self.doc_test_results.pop('''success''' ) self.doc_test_results.pop('''time_spent''' ) SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] ) for job, job_result in sorted_dict: if len(job_result['''failures'''] ): SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n''' SCREAMING_SNAKE_CASE_ = job_result['''failures'''] SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A ) print('''Sending the following reply''' ) print(json.dumps({'''blocks''': blocks} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , ) time.sleep(1 ) def A__ ( ): SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID'''] SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100''' SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json() SCREAMING_SNAKE_CASE_ = {} try: jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 ) for i in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json() jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return jobs except Exception as e: print('''Unknown error, could not fetch links.''', __lowerCamelCase ) return {} def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = {} if os.path.exists(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase ) for file in files: try: with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE_ = f.read() except UnicodeDecodeError as e: raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e return _artifact def A__ ( ): class UpperCamelCase__ : """simple docstring""" def __init__( self , _A ) -> List[Any]: SCREAMING_SNAKE_CASE_ = name SCREAMING_SNAKE_CASE_ = [] def __str__( self ) -> int: return self.name def _UpperCamelCase ( self , _A ) -> Tuple: self.paths.append({'''name''': self.name, '''path''': path} ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() ) for directory in directories: SCREAMING_SNAKE_CASE_ = directory if artifact_name not in _available_artifacts: SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase ) _available_artifacts[artifact_name].add_path(__lowerCamelCase ) return _available_artifacts if __name__ == "__main__": __UpperCAmelCase = get_job_links() __UpperCAmelCase = retrieve_available_artifacts() __UpperCAmelCase = collections.OrderedDict( [ ("*.py", "API Examples"), ("*.md", "MD Examples"), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' __UpperCAmelCase = { v: { "failed": [], "failures": {}, } for v in docs.values() } # Link to the GitHub Action job __UpperCAmelCase = github_actions_job_links.get("run_doctests") __UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0] __UpperCAmelCase = retrieve_artifact(artifact_path["name"]) if "stats" in artifact: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"]) __UpperCAmelCase = failed __UpperCAmelCase = success __UpperCAmelCase = time_spent[1:-1] + ", " __UpperCAmelCase = extract_first_line_failure(artifact["failures_short"]) for line in artifact["summary_short"].split("\n"): if re.search("FAILED", line): __UpperCAmelCase = line.replace("FAILED ", "") __UpperCAmelCase = line.split()[0].replace("\n", "") if "::" in line: __UpperCAmelCase , __UpperCAmelCase = line.split("::") else: __UpperCAmelCase , __UpperCAmelCase = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): __UpperCAmelCase = docs[file_regex] doc_test_results[category]["failed"].append(test) __UpperCAmelCase = all_failures[test] if test in all_failures else "N/A" __UpperCAmelCase = failure break __UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results) message.post() message.post_reply()
299
import math import random def A__ ( __lowerCamelCase, __lowerCamelCase = False ): if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __UpperCAmelCase = 0.02 def A__ ( __lowerCamelCase, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 ) for _ in range(__lowerCamelCase ): # Forward propagation SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a # Error delta SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_00 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = int(input("Expected value: ")) __UpperCAmelCase = int(input("Number of propagations: ")) print(forward_propagation(expected, number_propagations))
299
1
def snake_case( __magic_name__ , __magic_name__ ) -> bool: '''simple docstring''' lowercase : List[Any] = len(__magic_name__ ) lowercase : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): lowercase : List[str] = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): lowercase : str = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: lowercase : Optional[Any] = subset[i - 1][j] if arr[i - 1] <= j: lowercase : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
361
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCAmelCase_ = 'bert-base-cased' lowerCAmelCase_ = 'google/pegasus-xsum' lowerCAmelCase_ = [' Sam ate lunch today.', 'Sams lunch ingredients.'] lowerCAmelCase_ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] lowerCAmelCase_ = 'patrickvonplaten/t5-tiny-random' lowerCAmelCase_ = 'sshleifer/bart-tiny-random' lowerCAmelCase_ = 'sshleifer/tiny-mbart' lowerCAmelCase_ = 'sshleifer/tiny-marian-en-de' def snake_case( __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : List[str] = '''\n'''.join(__magic_name__ ) Path(__magic_name__ ).open('''w''' ).writelines(__magic_name__ ) def snake_case( __magic_name__ ) -> Optional[int]: '''simple docstring''' for split in ["train", "val", "test"]: _dump_articles(os.path.join(__magic_name__ , F"""{split}.source""" ) , __magic_name__ ) _dump_articles(os.path.join(__magic_name__ , F"""{split}.target""" ) , __magic_name__ ) return tmp_dir class _A ( _lowerCamelCase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self : List[str] , _A : Optional[Any] ) -> Dict: """simple docstring""" lowercase : int = AutoTokenizer.from_pretrained(_A ) lowercase : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowercase : List[str] = max(len(tokenizer.encode(_A ) ) for a in ARTICLES ) lowercase : Optional[int] = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES ) lowercase : str = 4 lowercase : List[Any] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowercase , lowercase : Optional[int] = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. lowercase : int = SeqaSeqDataset( _A , data_dir=_A , type_path='''train''' , max_source_length=_A , max_target_length=_A , src_lang=_A , tgt_lang=_A , ) lowercase : Optional[int] = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_A , _A ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowercase : int = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self : int , _A : Tuple ) -> List[str]: """simple docstring""" lowercase : int = AutoTokenizer.from_pretrained(_A ) lowercase : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowercase : Optional[int] = max(len(tokenizer.encode(_A ) ) for a in ARTICLES ) lowercase : List[Any] = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES ) lowercase : List[Any] = 4 lowercase : Any = LegacySeqaSeqDataset( _A , data_dir=_A , type_path='''train''' , max_source_length=20 , max_target_length=_A , ) lowercase : Optional[Any] = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self : List[str] ) -> int: """simple docstring""" lowercase : Tuple = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) lowercase : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowercase : Optional[int] = tmp_dir.joinpath('''train.source''' ).open().readlines() lowercase : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_A , _A , 128 , _A ) lowercase : Dict = {x.name for x in tmp_dir.iterdir()} lowercase : Optional[Any] = {x.name for x in save_dir.iterdir()} lowercase : int = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_A ) < len(_A ) assert len(_A ) == 1 assert len(packed_examples[0] ) == sum(len(_A ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def __a ( self : Any ) -> Dict: """simple docstring""" if not FAIRSEQ_AVAILABLE: return lowercase , lowercase , lowercase : Any = self._get_dataset(max_len=64 ) lowercase : List[Any] = 64 lowercase : Any = ds.make_dynamic_sampler(_A , required_batch_size_multiple=_A ) lowercase : Tuple = [len(_A ) for x in batch_sampler] assert len(set(_A ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_A ) == len(_A ) # no dropped or added examples lowercase : str = DataLoader(_A , batch_sampler=_A , collate_fn=ds.collate_fn , num_workers=2 ) lowercase : Optional[int] = [] lowercase : str = [] for batch in data_loader: lowercase : Tuple = batch['''input_ids'''].shape lowercase : List[Any] = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowercase : Dict = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(_A ) if num_src_tokens > (max_tokens * 1.1): failures.append(_A ) assert num_src_per_batch[0] == max(_A ) if failures: raise AssertionError(f"""too many tokens in {len(_A )} batches""" ) def __a ( self : int ) -> Any: """simple docstring""" lowercase , lowercase , lowercase : Tuple = self._get_dataset(max_len=512 ) lowercase : Tuple = 2 lowercase : Union[str, Any] = ds.make_sortish_sampler(_A , shuffle=_A ) lowercase : List[Any] = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 ) lowercase : List[Any] = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 , sampler=_A ) lowercase : int = tokenizer.pad_token_id def count_pad_tokens(_A : List[Any] , _A : Union[str, Any]="input_ids" ): return [batch[k].eq(_A ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_A , k='''labels''' ) ) < sum(count_pad_tokens(_A , k='''labels''' ) ) assert sum(count_pad_tokens(_A ) ) < sum(count_pad_tokens(_A ) ) assert len(_A ) == len(_A ) def __a ( self : Any , _A : Union[str, Any]=1_000 , _A : str=128 ) -> List[Any]: """simple docstring""" if os.getenv('''USE_REAL_DATA''' , _A ): lowercase : Optional[Any] = '''examples/seq2seq/wmt_en_ro''' lowercase : Optional[int] = max_len * 2 * 64 if not Path(_A ).joinpath('''train.len''' ).exists(): save_len_file(_A , _A ) else: lowercase : Tuple = '''examples/seq2seq/test_data/wmt_en_ro''' lowercase : Optional[Any] = max_len * 4 save_len_file(_A , _A ) lowercase : Optional[Any] = AutoTokenizer.from_pretrained(_A ) lowercase : Union[str, Any] = SeqaSeqDataset( _A , data_dir=_A , type_path='''train''' , max_source_length=_A , max_target_length=_A , n_obs=_A , ) return ds, max_tokens, tokenizer def __a ( self : List[str] ) -> List[str]: """simple docstring""" lowercase , lowercase , lowercase : Union[str, Any] = self._get_dataset() lowercase : int = set(DistributedSortishSampler(_A , 256 , num_replicas=2 , rank=0 , add_extra_examples=_A ) ) lowercase : Dict = set(DistributedSortishSampler(_A , 256 , num_replicas=2 , rank=1 , add_extra_examples=_A ) ) assert idsa.intersection(_A ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self : Union[str, Any] , _A : Tuple ) -> Optional[int]: """simple docstring""" lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A ) if tok_name == MBART_TINY: lowercase : Tuple = SeqaSeqDataset( _A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) lowercase : Union[str, Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowercase : List[Any] = SeqaSeqDataset( _A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) lowercase : Dict = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_A ) == 1 if tok_name == BART_TINY else len(_A ) == 0
116
0
'''simple docstring''' def lowerCamelCase__ ( _A = 1000 ): a , a : Optional[int] = 1, 1 a : Optional[int] = 2 while True: a : Tuple = 0 a : Any = fa + fa a , a : Union[str, Any] = fa, f index += 1 for _ in str(_A ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
297
'''simple docstring''' def lowerCamelCase__ ( _A ): return 10 - x * x def lowerCamelCase__ ( _A , _A ): # Bolzano theory in order to find if there is a root between a and b if equation(_A ) * equation(_A ) >= 0: raise ValueError('Wrong space!' ) a : Tuple = a while (b - a) >= 0.01: # Find middle point a : Tuple = (a + b) / 2 # Check if middle point is root if equation(_A ) == 0.0: break # Decide the side to repeat the steps if equation(_A ) * equation(_A ) < 0: a : List[str] = c else: a : Tuple = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
297
1
import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor _UpperCAmelCase = logging.get_logger(__name__) class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , *lowercase , **lowercase ): """simple docstring""" warnings.warn( 'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DonutImageProcessor instead.' , lowercase , ) super().__init__(*lowercase , **lowercase )
352
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 _UpperCAmelCase = { """return_dict""": False, """output_hidden_states""": True, """output_attentions""": True, """torchscript""": True, """torch_dtype""": """float16""", """use_bfloat16""": True, """tf_legacy_loss""": True, """pruned_heads""": {"""a""": 1}, """tie_word_embeddings""": False, """is_decoder""": True, """cross_attention_hidden_size""": 128, """add_cross_attention""": True, """tie_encoder_decoder""": True, """max_length""": 50, """min_length""": 3, """do_sample""": True, """early_stopping""": True, """num_beams""": 3, """num_beam_groups""": 3, """diversity_penalty""": 0.5, """temperature""": 2.0, """top_k""": 10, """top_p""": 0.7, """typical_p""": 0.2, """repetition_penalty""": 0.8, """length_penalty""": 0.8, """no_repeat_ngram_size""": 5, """encoder_no_repeat_ngram_size""": 5, """bad_words_ids""": [1, 2, 3], """num_return_sequences""": 3, """chunk_size_feed_forward""": 5, """output_scores""": True, """return_dict_in_generate""": True, """forced_bos_token_id""": 2, """forced_eos_token_id""": 3, """remove_invalid_values""": True, """architectures""": ["""BertModel"""], """finetuning_task""": """translation""", """id2label""": {0: """label"""}, """label2id""": {"""label""": """0"""}, """tokenizer_class""": """BertTokenizerFast""", """prefix""": """prefix""", """bos_token_id""": 6, """pad_token_id""": 7, """eos_token_id""": 8, """sep_token_id""": 9, """decoder_start_token_id""": 10, """exponential_decay_length_penalty""": (5, 1.01), """suppress_tokens""": [0, 1], """begin_suppress_tokens""": 2, """task_specific_params""": {"""translation""": """some_params"""}, """problem_type""": """regression""", } @is_staging_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def lowerCAmelCase_ ( cls ): """simple docstring""" A_ : Optional[int] = TOKEN HfFolder.save_token(lowercase ) @classmethod def lowerCAmelCase_ ( cls ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-config' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-config-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-config' ) except HTTPError: pass def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) config.push_to_hub('test-config' , use_auth_token=self._token ) A_ : Dict = BertConfig.from_pretrained(F'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase , getattr(lowercase , lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id='test-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase , repo_id='test-config' , push_to_hub=lowercase , use_auth_token=self._token ) A_ : Dict = BertConfig.from_pretrained(F'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase , getattr(lowercase , lowercase ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token ) A_ : List[Any] = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase , getattr(lowercase , lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase , repo_id='valid_org/test-config-org' , push_to_hub=lowercase , use_auth_token=self._token ) A_ : Optional[Any] = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase , getattr(lowercase , lowercase ) ) def lowerCAmelCase_ ( self ): """simple docstring""" CustomConfig.register_for_auto_class() A_ : Optional[int] = CustomConfig(attribute=4_2 ) config.push_to_hub('test-dynamic-config' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} ) A_ : Optional[int] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=lowercase ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' ) self.assertEqual(new_config.attribute , 4_2 ) class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated A_ : Optional[int] = c.n_embd + 1 # int A_ : List[str] = c.resid_pdrop + 1.0 # float A_ : str = not c.scale_attn_weights # bool A_ : Optional[int] = c.summary_type + 'foo' # str c.update_from_string( F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(lowercase , c.n_embd , 'mismatch for key: n_embd' ) self.assertEqual(lowercase , c.resid_pdrop , 'mismatch for key: resid_pdrop' ) self.assertEqual(lowercase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' ) self.assertEqual(lowercase , c.summary_type , 'mismatch for key: summary_type' ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = PretrainedConfig() A_ : int = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowercase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] ) A_ : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowercase , lowercase )] if len(lowercase ) > 0: raise ValueError( 'The following keys are set with the default values in' ' `test_configuration_common.config_common_kwargs` pick another value for them:' F''' {', '.join(lowercase )}.''' ) def lowerCAmelCase_ ( self ): """simple docstring""" with self.assertRaises(lowercase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ) A_ : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' ) self.assertIsNotNone(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = mock.Mock() A_ : int = 5_0_0 A_ : Union[str, Any] = {} A_ : List[str] = HTTPError A_ : List[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=lowercase ) as mock_head: A_ : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = BertConfig.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = AutoConfig.from_pretrained('bert-base-cased' ) A_ : Tuple = ['config.4.0.0.json'] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowercase ) A_ : Dict = 2 json.dump(configuration.to_dict() , open(os.path.join(lowercase , 'config.4.0.0.json' ) , 'w' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 A_ : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 A_ : int = ['config.42.0.0.json'] A_ : str = 7_6_8 configuration.save_pretrained(lowercase ) shutil.move(os.path.join(lowercase , 'config.4.0.0.json' ) , os.path.join(lowercase , 'config.42.0.0.json' ) ) A_ : str = AutoConfig.from_pretrained(lowercase ) self.assertEqual(new_configuration.hidden_size , 7_6_8 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = 'hf-internal-testing/test-two-configs' import transformers as new_transformers A_ : List[Any] = 'v4.0.0' A_ , A_ : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained( lowercase , return_unused_kwargs=lowercase ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowercase , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers A_ : Optional[int] = 'v3.0.0' A_ : List[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(lowercase ) self.assertEqual(old_configuration.hidden_size , 7_6_8 )
192
0
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a_ ( _A , _A=0.999 , _A="cosine" , ) -> Optional[int]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(_A ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_A ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) snake_case__ = [] for i in range(_A ): snake_case__ = i / num_diffusion_timesteps snake_case__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) ) return torch.tensor(_A , dtype=torch.floataa ) class __SCREAMING_SNAKE_CASE( a_ , a_ ): _UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers] _UpperCAmelCase = 2 @register_to_config def __init__( self: Dict , UpperCamelCase: int = 10_00 , UpperCamelCase: float = 0.00_085 , UpperCamelCase: float = 0.012 , UpperCamelCase: str = "linear" , UpperCamelCase: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase: str = "epsilon" , UpperCamelCase: Optional[bool] = False , UpperCamelCase: Optional[bool] = False , UpperCamelCase: float = 1.0 , UpperCamelCase: str = "linspace" , UpperCamelCase: int = 0 , ) -> str: if trained_betas is not None: snake_case__ = torch.tensor(UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": snake_case__ = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. snake_case__ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='cosine' ) elif beta_schedule == "exp": snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='exp' ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) snake_case__ = 1.0 - self.betas snake_case__ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase ) snake_case__ = use_karras_sigmas def lowerCAmelCase_ ( self: str , UpperCamelCase: int , UpperCamelCase: Optional[int]=None ) -> str: if schedule_timesteps is None: snake_case__ = self.timesteps snake_case__ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: snake_case__ = 1 if len(UpperCamelCase ) > 1 else 0 else: snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep snake_case__ = self._index_counter[timestep_int] return indices[pos].item() @property def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor: snake_case__ = self.index_for_timestep(UpperCamelCase ) snake_case__ = self.sigmas[step_index] snake_case__ = sample / ((sigma**2 + 1) ** 0.5) return sample def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Union[str, torch.device] = None , UpperCamelCase: Optional[int] = None , ) -> str: snake_case__ = num_inference_steps snake_case__ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": snake_case__ = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase , dtype=UpperCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": snake_case__ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 snake_case__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": snake_case__ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 snake_case__ = (np.arange(UpperCamelCase , 0 , -step_ratio )).round().copy().astype(UpperCamelCase ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) snake_case__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) snake_case__ = np.log(UpperCamelCase ) snake_case__ = np.interp(UpperCamelCase , np.arange(0 , len(UpperCamelCase ) ) , UpperCamelCase ) if self.config.use_karras_sigmas: snake_case__ = self._convert_to_karras(in_sigmas=UpperCamelCase , num_inference_steps=self.num_inference_steps ) snake_case__ = np.array([self._sigma_to_t(UpperCamelCase , UpperCamelCase ) for sigma in sigmas] ) snake_case__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) snake_case__ = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase ) snake_case__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) snake_case__ = torch.from_numpy(UpperCamelCase ) snake_case__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(UpperCamelCase ).startswith('mps' ): # mps does not support float64 snake_case__ = timesteps.to(UpperCamelCase , dtype=torch.floataa ) else: snake_case__ = timesteps.to(device=UpperCamelCase ) # empty dt and derivative snake_case__ = None snake_case__ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter snake_case__ = defaultdict(UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Dict ) -> Tuple: # get log sigma snake_case__ = np.log(UpperCamelCase ) # get distribution snake_case__ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range snake_case__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) snake_case__ = low_idx + 1 snake_case__ = log_sigmas[low_idx] snake_case__ = log_sigmas[high_idx] # interpolate sigmas snake_case__ = (low - log_sigma) / (low - high) snake_case__ = np.clip(UpperCamelCase , 0 , 1 ) # transform interpolation to time range snake_case__ = (1 - w) * low_idx + w * high_idx snake_case__ = t.reshape(sigma.shape ) return t def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Dict ) -> torch.FloatTensor: snake_case__ = in_sigmas[-1].item() snake_case__ = in_sigmas[0].item() snake_case__ = 7.0 # 7.0 is the value used in the paper snake_case__ = np.linspace(0 , 1 , UpperCamelCase ) snake_case__ = sigma_min ** (1 / rho) snake_case__ = sigma_max ** (1 / rho) snake_case__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: return self.dt is None def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: Union[float, torch.FloatTensor] , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]: snake_case__ = self.index_for_timestep(UpperCamelCase ) # advance index counter by 1 snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: snake_case__ = self.sigmas[step_index] snake_case__ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method snake_case__ = self.sigmas[step_index - 1] snake_case__ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API snake_case__ = 0 snake_case__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": snake_case__ = sigma_hat if self.state_in_first_order else sigma_next snake_case__ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": snake_case__ = sigma_hat if self.state_in_first_order else sigma_next snake_case__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": snake_case__ = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.config.clip_sample: snake_case__ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order snake_case__ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep snake_case__ = sigma_next - sigma_hat # store for 2nd order step snake_case__ = derivative snake_case__ = dt snake_case__ = sample else: # 2. 2nd order / Heun's method snake_case__ = (sample - pred_original_sample) / sigma_next snake_case__ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample snake_case__ = self.dt snake_case__ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" snake_case__ = None snake_case__ = None snake_case__ = None snake_case__ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples snake_case__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase ): # mps does not support float64 snake_case__ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) snake_case__ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: snake_case__ = self.timesteps.to(original_samples.device ) snake_case__ = timesteps.to(original_samples.device ) snake_case__ = [self.index_for_timestep(UpperCamelCase , UpperCamelCase ) for t in timesteps] snake_case__ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): snake_case__ = sigma.unsqueeze(-1 ) snake_case__ = original_samples + noise * sigma return noisy_samples def __len__( self: List[Any] ) -> Union[str, Any]: return self.config.num_train_timesteps
307
0
'''simple docstring''' import re from filelock import FileLock try: import nltk __lowercase : Optional[Any] = True except (ImportError, ModuleNotFoundError): __lowercase : Dict = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
294
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = 'laion/clap-htsat-unfused' __a : Optional[Any] = tempfile.mkdtemp() def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_tokenizer() __a : List[str] = self.get_feature_extractor() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) processor.save_pretrained(self.tmpdirname ) __a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) __a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 ) __a : Tuple = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : int = self.get_tokenizer() __a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : int = floats_list((3, 1000) ) __a : str = feature_extractor(__a , return_tensors='np' ) __a : int = processor(audios=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.get_feature_extractor() __a : Any = self.get_tokenizer() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Union[str, Any] = 'This is a test string' __a : Union[str, Any] = processor(text=__a ) __a : Tuple = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : str = self.get_tokenizer() __a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a : Optional[int] = processor.batch_decode(__a ) __a : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.get_feature_extractor() __a : Optional[int] = self.get_tokenizer() __a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
294
1
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--original_config_file", type=str, required=True, help="The YAML config file corresponding to the original architecture.", ) parser.add_argument( "--num_in_channels", default=None, type=int, help="The number of input channels. If `None` number of input channels will be automatically inferred.", ) parser.add_argument( "--image_size", default=512, type=int, help=( "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" " Base. Use 768 for Stable Diffusion v2." ), ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument( "--upcast_attention", action="store_true", help=( "Whether the attention computation should always be upcasted. This is necessary when running stable" " diffusion 2.1." ), ) parser.add_argument( "--from_safetensors", action="store_true", help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", ) parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") def _SCREAMING_SNAKE_CASE ( lowercase : Any ): '''simple docstring''' if string == "True": return True elif string == "False": return False else: raise ValueError(f"""could not parse string as bool {string}""" ) parser.add_argument( "--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool ) parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int) lowerCamelCase : List[Any] = parser.parse_args() lowerCamelCase : Any = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
204
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase : str = { "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", "NezhaForMultipleChoice", "NezhaForQuestionAnswering", "NezhaForSequenceClassification", "NezhaForTokenClassification", "NezhaModel", "NezhaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
204
1
from collections.abc import Sequence def snake_case ( snake_case__ :Sequence[float] , snake_case__ :bool = False) -> float: if not arr: return 0 _A = 0 if allow_empty_subarrays else float("""-inf""") _A = 0.0 for num in arr: _A = max(0 if allow_empty_subarrays else num , curr_sum + num) _A = max(snake_case__ , snake_case__) return max_sum if __name__ == "__main__": from doctest import testmod testmod() _SCREAMING_SNAKE_CASE = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
81
from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run _SCREAMING_SNAKE_CASE = True except (ImportError, AttributeError): _SCREAMING_SNAKE_CASE = object def snake_case ( *snake_case__ :Optional[int] , **snake_case__ :Any) -> int: pass _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = logging.get_logger('transformers-cli/serving') def snake_case ( snake_case__ :Namespace) -> Dict: _A = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(snake_case__ , args.host , args.port , args.workers) class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :dict class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :List[str] lowerCamelCase :Optional[List[int]] class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :str class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Any class a ( __lowerCAmelCase ): """simple docstring""" @staticmethod def UpperCAmelCase ( lowerCAmelCase_ ) -> Any: _A = parser.add_parser( """serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" ) serve_parser.add_argument( """--task""" , type=lowerCAmelCase_ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , ) serve_parser.add_argument("""--host""" , type=lowerCAmelCase_ , default="""localhost""" , help="""Interface the server will listen on.""" ) serve_parser.add_argument("""--port""" , type=lowerCAmelCase_ , default=88_88 , help="""Port the serving will listen to.""" ) serve_parser.add_argument("""--workers""" , type=lowerCAmelCase_ , default=1 , help="""Number of http workers""" ) serve_parser.add_argument("""--model""" , type=lowerCAmelCase_ , help="""Model's name or path to stored model.""" ) serve_parser.add_argument("""--config""" , type=lowerCAmelCase_ , help="""Model's config name or path to stored model.""" ) serve_parser.add_argument("""--tokenizer""" , type=lowerCAmelCase_ , help="""Tokenizer name to use.""" ) serve_parser.add_argument( """--device""" , type=lowerCAmelCase_ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , ) serve_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _A = pipeline _A = host _A = port _A = workers if not _serve_dependencies_installed: raise RuntimeError( """Using serve command requires FastAPI and uvicorn. """ """Please install transformers with [serving]: pip install \"transformers[serving]\".""" """Or install FastAPI and uvicorn separately.""" ) else: logger.info(F'''Serving model over {host}:{port}''' ) _A = FastAPI( routes=[ APIRoute( """/""" , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""GET"""] , ), APIRoute( """/tokenize""" , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ), APIRoute( """/detokenize""" , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ), APIRoute( """/forward""" , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=["""POST"""] , ), ] , timeout=6_00 , ) def UpperCAmelCase ( self ) -> str: run(self._app , host=self.host , port=self.port , workers=self.workers ) def UpperCAmelCase ( self ) -> Union[str, Any]: return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def UpperCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> List[Any]: try: _A = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ ) if return_ids: _A = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ ) else: return ServeTokenizeResult(tokens=lowerCAmelCase_ ) except Exception as e: raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase_ )} ) def UpperCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Dict: try: _A = self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return ServeDeTokenizeResult(model="""""" , text=lowerCAmelCase_ ) except Exception as e: raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase_ )} ) async def UpperCAmelCase ( self , lowerCAmelCase_=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Any: # Check we don't have empty string if len(lowerCAmelCase_ ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model _A = self._pipeline(lowerCAmelCase_ ) return ServeForwardResult(output=lowerCAmelCase_ ) except Exception as e: raise HTTPException(5_00 , {"""error""": str(lowerCAmelCase_ )} )
81
1
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCAmelCase__ = get_logger(__name__) class a__ : """simple docstring""" __lowerCamelCase = 'dummy_data' __lowerCamelCase = 'datasets' __lowerCamelCase = False def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ) -> Dict: '''simple docstring''' A__ = 0 A__ = dataset_name A__ = cache_dir A__ = use_local_dummy_data A__ = config # download_callbacks take a single url as input A__ = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root A__ = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general A__ = str(lowercase ) # to be downloaded A__ = None A__ = None @property def UpperCamelCase ( self ) -> Dict: '''simple docstring''' if self._dummy_file is None: A__ = self.download_dummy_data() return self._dummy_file @property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def UpperCamelCase ( self ) -> Any: '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) A__ = cached_path( lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase ) return os.path.join(lowercase , self.dummy_file_name ) @property def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def UpperCamelCase ( self ) -> int: '''simple docstring''' if self._bucket_url is None: A__ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def UpperCamelCase ( self ) -> Any: '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def UpperCamelCase ( self , lowercase , *lowercase ) -> Any: '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested A__ = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned A__ = self.dummy_file_name # special case when data_url is a dict if isinstance(lowercase , lowercase ): return self.create_dummy_data_dict(lowercase , lowercase ) elif isinstance(lowercase , (list, tuple) ): return self.create_dummy_data_list(lowercase , lowercase ) else: return self.create_dummy_data_single(lowercase , lowercase ) def UpperCamelCase ( self , lowercase , *lowercase ) -> Dict: '''simple docstring''' return self.download_and_extract(lowercase ) def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple: '''simple docstring''' return self.download_and_extract(lowercase ) def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> Optional[int]: '''simple docstring''' return path def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' return {} def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' A__ = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowercase , lowercase ): for single_url in single_urls: download_callback(lowercase ) else: A__ = single_urls download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowercase , lowercase ): A__ = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls] else: A__ = single_urls A__ = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) A__ = value # make sure that values are unique if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique A__ = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCamelCase ( self , lowercase , lowercase ) -> Dict: '''simple docstring''' A__ = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one A__ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowercase ) ) for url in data_url ) A__ = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): A__ = [data_url[0]] * len(lowercase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus A__ = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(lowercase ) return dummy_data_list def UpperCamelCase ( self , lowercase , lowercase ) -> List[str]: '''simple docstring''' for download_callback in self.download_callbacks: download_callback(lowercase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus A__ = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(lowercase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCamelCase ( self ) -> int: '''simple docstring''' pass def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass def UpperCamelCase ( self , lowercase ) -> Optional[Any]: '''simple docstring''' def _iter_archive_members(lowercase ): # this preserves the order of the members inside the ZIP archive A__ = Path(self.dummy_file ).parent A__ = path.relative_to(lowercase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: A__ = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowercase ) A__ = Path(lowercase ) A__ = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(lowercase ).as_posix(), file_path.open("rb" ) def UpperCamelCase ( self , lowercase ) -> Dict: '''simple docstring''' if not isinstance(lowercase , lowercase ): A__ = [paths] for path in paths: if os.path.isfile(lowercase ): if os.path.basename(lowercase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowercase ): if os.path.basename(lowercase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(lowercase ): if filename.startswith((".", "__") ): continue yield os.path.join(lowercase , lowercase )
68
import sys def _A ( SCREAMING_SNAKE_CASE__ : List[str] ): UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Any = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )] UpperCamelCase :List[Any] = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )] for chain_length in range(2 , SCREAMING_SNAKE_CASE__ ): for a in range(1 , n - chain_length + 1 ): UpperCamelCase :Optional[Any] = a + chain_length - 1 UpperCamelCase :int = sys.maxsize for c in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCamelCase :Any = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: UpperCamelCase :int = cost UpperCamelCase :List[str] = c return matrix, sol def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ): if i == j: print('''A''' + str(SCREAMING_SNAKE_CASE__ ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] ) print_optiomal_solution(SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE__ ) print(''')''' , end=''' ''' ) def _A ( ): UpperCamelCase :Optional[int] = [30, 35, 15, 5, 10, 20, 25] UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 UpperCamelCase , UpperCamelCase :Dict = matrix_chain_order(SCREAMING_SNAKE_CASE__ ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(SCREAMING_SNAKE_CASE__ , 1 , n - 1 ) if __name__ == "__main__": main()
259
0
'''simple docstring''' import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = UniSpeechSatForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ ) __lowercase = downstream_dict["""projector.weight"""] __lowercase = downstream_dict["""projector.bias"""] __lowercase = downstream_dict["""model.post_net.linear.weight"""] __lowercase = downstream_dict["""model.post_net.linear.bias"""] return model def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ ) __lowercase = downstream_dict["""model.linear.weight"""] __lowercase = downstream_dict["""model.linear.bias"""] return model def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = UniSpeechSatForXVector.from_pretrained(snake_case_ , config=snake_case_ ) __lowercase = downstream_dict["""connector.weight"""] __lowercase = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __lowercase = downstream_dict[ F"model.framelevel_feature_extractor.module.{i}.kernel.weight" ] __lowercase = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"] __lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __lowercase = downstream_dict["""objective.W"""] return model @torch.no_grad() def _A ( A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = torch.load(snake_case_ , map_location='''cpu''' ) __lowercase = checkpoint["""Downstream"""] __lowercase = UniSpeechSatConfig.from_pretrained(snake_case_ ) __lowercase = WavaVecaFeatureExtractor.from_pretrained( snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ ) __lowercase = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): __lowercase = convert_classification(snake_case_ , snake_case_ , snake_case_ ) elif arch.endswith('''ForAudioFrameClassification''' ): __lowercase = convert_diarization(snake_case_ , snake_case_ , snake_case_ ) elif arch.endswith('''ForXVector''' ): __lowercase = convert_xvector(snake_case_ , snake_case_ , snake_case_ ) else: raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" ) if hf_config.use_weighted_layer_sum: __lowercase = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(snake_case_ ) hf_model.save_pretrained(snake_case_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') lowerCAmelCase__ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
355
'''simple docstring''' lowerCAmelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def _A ( ): """simple docstring""" __lowercase = input('''Enter message: ''' ) __lowercase = input('''Enter key [alphanumeric]: ''' ) __lowercase = input('''Encrypt/Decrypt [e/d]: ''' ) if mode.lower().startswith('''e''' ): __lowercase = '''encrypt''' __lowercase = encrypt_message(A__ , A__ ) elif mode.lower().startswith('''d''' ): __lowercase = '''decrypt''' __lowercase = decrypt_message(A__ , A__ ) print(F"\n{mode.title()}ed message:" ) print(A__ ) def _A ( A__ , A__ ): """simple docstring""" return translate_message(A__ , A__ , '''encrypt''' ) def _A ( A__ , A__ ): """simple docstring""" return translate_message(A__ , A__ , '''decrypt''' ) def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = [] __lowercase = 0 __lowercase = key.upper() for symbol in message: __lowercase = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(A__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(A__ ): __lowercase = 0 else: translated.append(A__ ) return "".join(A__ ) if __name__ == "__main__": main()
52
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __snake_case ( _UpperCAmelCase ): __a = 384 __a = 7 if "tiny" in model_name: __a = 96 __a = (2, 2, 6, 2) __a = (3, 6, 12, 24) elif "small" in model_name: __a = 96 __a = (2, 2, 18, 2) __a = (3, 6, 12, 24) elif "base" in model_name: __a = 128 __a = (2, 2, 18, 2) __a = (4, 8, 16, 32) __a = 12 __a = 512 elif "large" in model_name: __a = 192 __a = (2, 2, 18, 2) __a = (6, 12, 24, 48) __a = 12 __a = 768 # set label information __a = 150 __a = 'huggingface/label-files' __a = 'ade20k-id2label.json' __a = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) ) __a = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} __a = {v: k for k, v in idalabel.items()} __a = SwinConfig( embed_dim=SCREAMING_SNAKE_CASE_ , depths=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , window_size=SCREAMING_SNAKE_CASE_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) __a = UperNetConfig( backbone_config=SCREAMING_SNAKE_CASE_ , auxiliary_in_channels=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , ) return config def __snake_case ( _UpperCAmelCase ): __a = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = dct.pop(SCREAMING_SNAKE_CASE_ ) __a = val def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __a = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __a = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) __a = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __a = in_proj_weight[:dim, :] __a = in_proj_bias[: dim] __a = in_proj_weight[ dim : dim * 2, : ] __a = in_proj_bias[ dim : dim * 2 ] __a = in_proj_weight[ -dim :, : ] __a = in_proj_bias[-dim :] # fmt: on def __snake_case ( _UpperCAmelCase ): __a = x.shape __a = x.reshape(SCREAMING_SNAKE_CASE_ , 4 , in_channel // 4 ) __a = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return x def __snake_case ( _UpperCAmelCase ): __a = x.shape __a = x.reshape(SCREAMING_SNAKE_CASE_ , in_channel // 4 , 4 ) __a = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return x def __snake_case ( _UpperCAmelCase ): __a = x.shape[0] __a = x.reshape(4 , in_channel // 4 ) __a = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ ) return x def __snake_case ( _UpperCAmelCase ): __a = x.shape[0] __a = x.reshape(in_channel // 4 , 4 ) __a = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ ) return x def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } __a = model_name_to_url[model_name] __a = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' , file_name=SCREAMING_SNAKE_CASE_ )[ 'state_dict' ] for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE_ , param.shape ) __a = get_upernet_config(SCREAMING_SNAKE_CASE_ ) __a = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __a = state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "bn" in key: __a = key.replace('''bn''' , '''batch_norm''' ) __a = val # rename keys __a = create_rename_keys(SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: __a = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE_ ) if "norm" in key: __a = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # verify on image __a = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' __a = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('''RGB''' ) __a = SegformerImageProcessor() __a = processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values with torch.no_grad(): __a = model(SCREAMING_SNAKE_CASE_ ) __a = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": __a = torch.tensor( [[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ) elif model_name == "upernet-swin-small": __a = torch.tensor( [[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] ) elif model_name == "upernet-swin-base": __a = torch.tensor( [[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] ) elif model_name == "upernet-swin-large": __a = torch.tensor( [[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": __snake_case :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[f'upernet-swin-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __snake_case :List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
49
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( __magic_name__ , unittest.TestCase ): lowercase = ReformerTokenizer lowercase = ReformerTokenizerFast lowercase = True lowercase = False lowercase = True def _lowerCamelCase ( self : Dict ): '''simple docstring''' super().setUp() lowerCAmelCase__ : int = ReformerTokenizer(a , keep_accents=a ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : int = '<s>' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(a ) , 1_000 ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCAmelCase__ : Optional[int] = self.get_tokenizer() lowerCAmelCase__ : Any = self.get_rust_tokenizer() lowerCAmelCase__ : Optional[int] = 'I was born in 92000, and this is falsé.' lowerCAmelCase__ : List[Any] = tokenizer.tokenize(a ) lowerCAmelCase__ : Optional[int] = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowerCAmelCase__ : List[str] = tokenizer.encode(a , add_special_tokens=a ) lowerCAmelCase__ : Any = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) lowerCAmelCase__ : Any = self.get_rust_tokenizer() lowerCAmelCase__ : List[str] = tokenizer.encode(a ) lowerCAmelCase__ : Optional[int] = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(a , **a ) # Simple input lowerCAmelCase__ : Any = 'This is a simple input' lowerCAmelCase__ : str = ['This is a simple input 1', 'This is a simple input 2'] lowerCAmelCase__ : Optional[int] = ('This is a simple input', 'This is a pair') lowerCAmelCase__ : Optional[int] = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' ) # Simple input self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' ) # Simple input self.assertRaises( a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , ) # Pair input self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' ) # Pair input self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' ) # Pair input self.assertRaises( a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' pass def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = ReformerTokenizer(a , keep_accents=a ) lowerCAmelCase__ : Dict = tokenizer.tokenize('This is a test' ) self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a ) , [285, 46, 10, 170, 382] , ) lowerCAmelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase__ : int = tokenizer.convert_tokens_to_ids(a ) self.assertListEqual( a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowerCAmelCase__ : str = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual( a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' ) @slow def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = 'Hello World!' lowerCAmelCase__ : int = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(a , self.big_tokenizer.encode(a ) ) @slow def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : int = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) lowerCAmelCase__ : int = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(a , self.big_tokenizer.encode(a ) ) @require_torch @slow def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' import torch from transformers import ReformerConfig, ReformerModel # Build sequence lowerCAmelCase__ : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10] lowerCAmelCase__ : Optional[Any] = ' '.join(a ) lowerCAmelCase__ : Any = self.big_tokenizer.encode_plus(a , return_tensors='pt' ) lowerCAmelCase__ : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' ) lowerCAmelCase__ : List[Any] = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) lowerCAmelCase__ : int = encoded_sequence['input_ids'].shape lowerCAmelCase__ : Union[str, Any] = ReformerModel(a ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**a ) model(**a ) @slow def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 lowerCAmelCase__ : List[str] = [ 'This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.', ] self.tokenizer_integration_test_util( expected_encoding=a , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=a , sequences=a , )
212
0
"""simple docstring""" import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("0.12.2"): raise Exception("requires fairseq >= 0.12.2") if version.parse(fairseq.__version__) > version.parse("2"): raise Exception("requires fairseq < v2") logging.set_verbosity_info() A : int = logging.get_logger(__name__) A : List[str] = "Hello, World!" A : Optional[int] = "en_XX" def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = Path("data_bin" ) __lowerCAmelCase = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(_UpperCamelCase ).parent ) , checkpoint_file=Path(_UpperCamelCase ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(_UpperCamelCase ) , bpe="sentencepiece" , sentencepiece_model=str(Path(_UpperCamelCase ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , ) xmod.eval() # disable dropout print(_UpperCamelCase ) __lowerCAmelCase = xmod.model.encoder.sentence_encoder __lowerCAmelCase = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: __lowerCAmelCase = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , _UpperCamelCase ) __lowerCAmelCase = XmodForSequenceClassification(_UpperCamelCase ) if classification_head else XmodForMaskedLM(_UpperCamelCase ) model.eval() # Now let's copy all the weights. # Embeddings __lowerCAmelCase = xmod_sent_encoder.embed_tokens.weight __lowerCAmelCase = xmod_sent_encoder.embed_positions.weight __lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. __lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.weight __lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __lowerCAmelCase = model.roberta.encoder.layer[i] __lowerCAmelCase = xmod_sent_encoder.layers[i] # self attention __lowerCAmelCase = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("Dimensions of self-attention weights do not match." ) __lowerCAmelCase = xmod_layer.self_attn.q_proj.weight __lowerCAmelCase = xmod_layer.self_attn.q_proj.bias __lowerCAmelCase = xmod_layer.self_attn.k_proj.weight __lowerCAmelCase = xmod_layer.self_attn.k_proj.bias __lowerCAmelCase = xmod_layer.self_attn.v_proj.weight __lowerCAmelCase = xmod_layer.self_attn.v_proj.bias # self-attention output __lowerCAmelCase = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match." ) __lowerCAmelCase = xmod_layer.self_attn.out_proj.weight __lowerCAmelCase = xmod_layer.self_attn.out_proj.bias __lowerCAmelCase = xmod_layer.self_attn_layer_norm.weight __lowerCAmelCase = xmod_layer.self_attn_layer_norm.bias # intermediate __lowerCAmelCase = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match." ) __lowerCAmelCase = xmod_layer.fca.weight __lowerCAmelCase = xmod_layer.fca.bias # output __lowerCAmelCase = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match." ) __lowerCAmelCase = xmod_layer.fca.weight __lowerCAmelCase = xmod_layer.fca.bias __lowerCAmelCase = xmod_layer.final_layer_norm.weight __lowerCAmelCase = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: __lowerCAmelCase = xmod_layer.adapter_layer_norm.weight __lowerCAmelCase = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("Lists of language adapters do not match." ) for lang_code, adapter in xmod_layer.adapter_modules.items(): __lowerCAmelCase = bert_output.adapter_modules[lang_code] __lowerCAmelCase = xmod_layer.adapter_modules[lang_code] __lowerCAmelCase = from_adapter.fca.weight __lowerCAmelCase = from_adapter.fca.bias __lowerCAmelCase = from_adapter.fca.weight __lowerCAmelCase = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: __lowerCAmelCase = xmod_sent_encoder.layer_norm.weight __lowerCAmelCase = xmod_sent_encoder.layer_norm.bias if classification_head: __lowerCAmelCase = xmod.model.classification_heads["mnli"].dense.weight __lowerCAmelCase = xmod.model.classification_heads["mnli"].dense.bias __lowerCAmelCase = xmod.model.classification_heads["mnli"].out_proj.weight __lowerCAmelCase = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head __lowerCAmelCase = xmod.model.encoder.lm_head.dense.weight __lowerCAmelCase = xmod.model.encoder.lm_head.dense.bias __lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.weight __lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.bias __lowerCAmelCase = xmod.model.encoder.lm_head.weight __lowerCAmelCase = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. __lowerCAmelCase = xmod.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(_UpperCamelCase ) __lowerCAmelCase = model(_UpperCamelCase )[0] if classification_head: __lowerCAmelCase = xmod.model.classification_heads["mnli"](xmod.extract_features(_UpperCamelCase ) ) else: __lowerCAmelCase = xmod.model(_UpperCamelCase , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) __lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 __lowerCAmelCase = torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) A : Tuple = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
259
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = "cpu" , _UpperCamelCase = None ): '''simple docstring''' __lowerCAmelCase = torch.load(_UpperCamelCase , map_location=_UpperCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(_UpperCamelCase , torch.Tensor ): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" ) __lowerCAmelCase = v.half() if save_path is None: # overwrite src_path __lowerCAmelCase = src_path torch.save(_UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": fire.Fire(convert)
259
1
import pprint import requests __lowerCamelCase : Tuple = """https://zenquotes.io/api""" def A_ ( ) -> list: return requests.get(API_ENDPOINT_URL + "/today" ).json() def A_ ( ) -> list: return requests.get(API_ENDPOINT_URL + "/random" ).json() if __name__ == "__main__": __lowerCamelCase : Optional[int] = random_quotes() pprint.pprint(response)
52
from abc import ABC, abstractmethod from argparse import ArgumentParser class _SCREAMING_SNAKE_CASE ( snake_case_ ): @staticmethod @abstractmethod def SCREAMING_SNAKE_CASE_( lowercase ) -> int: raise NotImplementedError() @abstractmethod def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: raise NotImplementedError()
19
0
'''simple docstring''' from importlib import import_module from .logging import get_logger __lowerCAmelCase = get_logger(__name__) class __magic_name__ : def __init__( self : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any=None ): _a : int = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('__' ): setattr(self ,_UpperCAmelCase ,getattr(_UpperCAmelCase ,_UpperCAmelCase ) ) _a : Tuple = module._original_module if isinstance(_UpperCAmelCase ,_PatchedModuleObj ) else module class __magic_name__ : lowerCAmelCase : List[str] = [] def __init__( self : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any]=None ): _a : str = obj _a : str = target _a : str = new _a : Any = target.split('.' )[0] _a : Optional[int] = {} _a : Union[str, Any] = attrs or [] def __enter__( self : str ): *_a , _a : Any = self.target.split('.' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(_UpperCAmelCase ) ): try: _a : Any = import_module('.'.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _a : Any = getattr(self.obj ,_UpperCAmelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(_UpperCAmelCase ,_PatchedModuleObj ) and obj_attr._original_module is submodule) ): _a : List[Any] = obj_attr # patch at top level setattr(self.obj ,_UpperCAmelCase ,_PatchedModuleObj(_UpperCAmelCase ,attrs=self.attrs ) ) _a : str = getattr(self.obj ,_UpperCAmelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(_UpperCAmelCase ,_UpperCAmelCase ,_PatchedModuleObj(getattr(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,attrs=self.attrs ) ) _a : Any = getattr(_UpperCAmelCase ,_UpperCAmelCase ) # finally set the target attribute setattr(_UpperCAmelCase ,_UpperCAmelCase ,self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _a : Union[str, Any] = getattr(import_module('.'.join(_UpperCAmelCase ) ) ,_UpperCAmelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj ,_UpperCAmelCase ) is attr_value: _a : Optional[int] = getattr(self.obj ,_UpperCAmelCase ) setattr(self.obj ,_UpperCAmelCase ,self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _a : Tuple = globals()['__builtins__'][target_attr] setattr(self.obj ,_UpperCAmelCase ,self.new ) else: raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self : Any ,*_UpperCAmelCase : Tuple ): for attr in list(self.original ): setattr(self.obj ,_UpperCAmelCase ,self.original.pop(_UpperCAmelCase ) ) def __lowercase ( self : Dict ): self.__enter__() self._active_patches.append(self ) def __lowercase ( self : Tuple ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
366
'''simple docstring''' from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( lowerCAmelCase_ ) -> str: return "".join(sorted(lowerCAmelCase_ ) ) def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]: return word_by_signature[signature(lowerCAmelCase_ )] __lowerCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') __lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()}) __lowerCAmelCase = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
107
0
def _a ( lowerCamelCase: int ) -> list: '''simple docstring''' __A = int(lowerCamelCase ) if n_element < 1: __A = ValueError('''a should be a positive number''' ) raise my_error __A = [1] __A , __A , __A = (0, 0, 0) __A = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": snake_case__ : str = input('Enter the last number (nth term) of the Hamming Number Series: ') print('Formula of Hamming Number Series => 2^i * 3^j * 5^k') snake_case__ : List[str] = hamming(int(n)) print('-----------------------------------------------------') print(f'The list with nth numbers is: {hamming_numbers}') print('-----------------------------------------------------')
117
import os snake_case__ : Any = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000} def _a ( lowerCamelCase: str ) -> int: '''simple docstring''' __A = 0 __A = 0 while index < len(lowerCamelCase ) - 1: __A = SYMBOLS[numerals[index]] __A = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def _a ( lowerCamelCase: int ) -> str: '''simple docstring''' __A = '''''' __A = num // 10_00 numerals += m_count * "M" num %= 10_00 __A = num // 1_00 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 1_00 __A = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def _a ( lowerCamelCase: str = "/p089_roman.txt" ) -> int: '''simple docstring''' __A = 0 with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea: __A = filea.readlines() for line in lines: __A = line.strip() __A = parse_roman_numerals(lowerCamelCase ) __A = generate_roman_numerals(lowerCamelCase ) savings += len(lowerCamelCase ) - len(lowerCamelCase ) return savings if __name__ == "__main__": print(f'{solution() = }')
117
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple=13 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : List[Any]=224 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : Union[str, Any]=400 , _lowerCamelCase : List[str]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=True , _lowerCamelCase : Any=[0.5, 0.5, 0.5] , _lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , ): """simple docstring""" A_ : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18} A_ : str = parent A_ : Any = batch_size A_ : Union[str, Any] = num_channels A_ : str = image_size A_ : Union[str, Any] = min_resolution A_ : Dict = max_resolution A_ : Any = do_resize A_ : str = size A_ : int = do_normalize A_ : Optional[int] = image_mean A_ : List[str] = image_std def _a ( self : Tuple ): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ViTImageProcessor if is_vision_available() else None def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Tuple = EfficientFormerImageProcessorTester(self ) @property def _a ( self : Any ): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def _a ( self : int ): """simple docstring""" A_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input A_ : int = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A_ : Optional[int] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def _a ( self : Any ): """simple docstring""" A_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input A_ : Optional[int] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def _a ( self : Optional[int] ): """simple docstring""" A_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input A_ : int = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A_ : Optional[Any] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
4
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example snake_case__ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]: A_ : str = [] for i in range(len(lowerCamelCase__ ) ): A_ : Optional[Any] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A_ : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A_ : List[str] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase__ ) return next_generation def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]: A_ : List[Any] = [] for _ in range(lowerCamelCase__ ): # Create output image A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) ) A_ : int = img.load() # Save cells to image for x in range(len(lowerCamelCase__ ) ): for y in range(len(cells[0] ) ): A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5 A_ : str = (colour, colour, colour) # Save image images.append(lowerCamelCase__ ) A_ : Optional[int] = new_generation(lowerCamelCase__ ) return images if __name__ == "__main__": snake_case__ = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
4
1
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : int , __A : Optional[int] ) -> Tuple: """simple docstring""" a_ : Tuple = os.path.abspath(__A ) logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model a_ : List[str] = tf.train.list_variables(__A ) a_ : Dict = [] a_ : str = [] a_ : List[Any] = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") a_ : Union[str, Any] = full_name.split('/' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(F"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' a_ : Dict = name[1:] # figure out how many levels deep the name is a_ : str = 0 for _name in name: if _name.startswith('layer_with_weights' ): depth += 1 else: break layer_depth.append(__A ) # read data a_ : Any = tf.train.load_variable(__A , __A ) names.append('/'.join(__A ) ) arrays.append(__A ) logger.info(F"""Read a total of {len(__A ):,} layers""" ) # Sanity check if len(set(__A ) ) != 1: raise ValueError(F"""Found layer names with different depths (layer depth {list(set(__A ) )})""" ) a_ : Union[str, Any] = list(set(__A ) )[0] if layer_depth != 1: raise ValueError( 'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP' ' heads.' ) # convert layers logger.info('Converting weights...' ) for full_name, array in zip(__A , __A ): a_ : List[str] = full_name.split('/' ) a_ : List[str] = model a_ : int = [] for i, m_name in enumerate(__A ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('layer_with_weights' ): a_ : Optional[Any] = int(m_name.split('-' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['embeddings', 'LayerNorm'] ) a_ : List[str] = getattr(__A , 'embeddings' ) a_ : Any = getattr(__A , 'LayerNorm' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['encoder', 'layer', str(layer_num - 4 )] ) a_ : Optional[int] = getattr(__A , 'encoder' ) a_ : Union[str, Any] = getattr(__A , 'layer' ) a_ : List[str] = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['pooler', 'dense'] ) a_ : str = getattr(__A , 'pooler' ) a_ : List[Any] = getattr(__A , 'dense' ) elif m_name == "embeddings": trace.append('embeddings' ) a_ : Optional[int] = getattr(__A , 'embeddings' ) if layer_num == 0: trace.append('word_embeddings' ) a_ : int = getattr(__A , 'word_embeddings' ) elif layer_num == 1: trace.append('position_embeddings' ) a_ : List[str] = getattr(__A , 'position_embeddings' ) elif layer_num == 2: trace.append('token_type_embeddings' ) a_ : str = getattr(__A , 'token_type_embeddings' ) else: raise ValueError(F"""Unknown embedding layer with name {full_name}""" ) trace.append('weight' ) a_ : Any = getattr(__A , 'weight' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['attention', 'self'] ) a_ : Dict = getattr(__A , 'attention' ) a_ : Optional[int] = getattr(__A , 'self' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['attention', 'output', 'LayerNorm'] ) a_ : Optional[int] = getattr(__A , 'attention' ) a_ : Dict = getattr(__A , 'output' ) a_ : Any = getattr(__A , 'LayerNorm' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['attention', 'output', 'dense'] ) a_ : Optional[int] = getattr(__A , 'attention' ) a_ : int = getattr(__A , 'output' ) a_ : List[Any] = getattr(__A , 'dense' ) elif m_name == "_output_dense": # output dense trace.extend(['output', 'dense'] ) a_ : Any = getattr(__A , 'output' ) a_ : Any = getattr(__A , 'dense' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['output', 'LayerNorm'] ) a_ : Tuple = getattr(__A , 'output' ) a_ : Any = getattr(__A , 'LayerNorm' ) elif m_name == "_key_dense": # attention key trace.append('key' ) a_ : Optional[int] = getattr(__A , 'key' ) elif m_name == "_query_dense": # attention query trace.append('query' ) a_ : Tuple = getattr(__A , 'query' ) elif m_name == "_value_dense": # attention value trace.append('value' ) a_ : Any = getattr(__A , 'value' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['intermediate', 'dense'] ) a_ : Any = getattr(__A , 'intermediate' ) a_ : Optional[int] = getattr(__A , 'dense' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('output' ) a_ : Optional[int] = getattr(__A , 'output' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('bias' ) a_ : Any = getattr(__A , 'bias' ) elif m_name in ["kernel", "gamma"]: trace.append('weight' ) a_ : str = getattr(__A , 'weight' ) else: logger.warning(F"""Ignored {m_name}""" ) # for certain layers reshape is necessary a_ : Union[str, Any] = '.'.join(__A ) if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , __A ) or re.match( R'(\S+)\.attention\.output\.dense\.weight' , __A ): a_ : Dict = array.reshape(pointer.data.shape ) if "kernel" in full_name: a_ : Optional[Any] = array.transpose() if pointer.shape == array.shape: a_ : Tuple = torch.from_numpy(__A ) else: raise ValueError( F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" F""" {array.shape}""" ) logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Optional[int] , __A : List[str] ) -> List[Any]: """simple docstring""" logger.info(F"""Loading model based on config from {config_path}...""" ) a_ : str = BertConfig.from_json_file(__A ) a_ : Optional[Any] = BertModel(__A ) # Load weights from checkpoint logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(__A , __A , __A ) # Save pytorch-model logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict() , __A ) if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).', ) UpperCAmelCase_ : Tuple = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
32
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor __A : Dict = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> None: warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , ) super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
154
0
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = (UniPCMultistepScheduler,) a_ = (("""num_inference_steps""", 25),) def lowercase ( self : Tuple , **lowerCAmelCase_ : Any ) -> int: __lowerCAmelCase = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'solver_type': 'bh2', } config.update(**lowerCAmelCase_ ) return config def lowercase ( self : int , lowerCAmelCase_ : Optional[int]=0 , **lowerCAmelCase_ : Any ) -> List[Any]: __lowerCAmelCase = dict(self.forward_default_kwargs ) __lowerCAmelCase = kwargs.pop('num_inference_steps' , lowerCAmelCase_ ) __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample __lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase_ ) __lowerCAmelCase = scheduler_class(**lowerCAmelCase_ ) scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residuals __lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase_ ) __lowerCAmelCase = scheduler_class.from_pretrained(lowerCAmelCase_ ) new_scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residuals __lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase , __lowerCAmelCase = sample, sample for t in range(lowerCAmelCase_ , time_step + scheduler.config.solver_order + 1 ): __lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample __lowerCAmelCase = new_scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : str=0 , **lowerCAmelCase_ : Dict ) -> Tuple: __lowerCAmelCase = dict(self.forward_default_kwargs ) __lowerCAmelCase = kwargs.pop('num_inference_steps' , lowerCAmelCase_ ) __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample __lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**lowerCAmelCase_ ) scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residuals (must be after setting timesteps) __lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase_ ) __lowerCAmelCase = scheduler_class.from_pretrained(lowerCAmelCase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residual (must be after setting timesteps) __lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample __lowerCAmelCase = new_scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Tuple ) -> List[str]: if scheduler is None: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase_ ) __lowerCAmelCase = scheduler_class(**lowerCAmelCase_ ) __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase_ ) __lowerCAmelCase = scheduler_class(**lowerCAmelCase_ ) __lowerCAmelCase = 1_0 __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase = model(lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample return sample def lowercase ( self : Tuple ) -> Tuple: __lowerCAmelCase = dict(self.forward_default_kwargs ) __lowerCAmelCase = kwargs.pop('num_inference_steps' , lowerCAmelCase_ ) for scheduler_class in self.scheduler_classes: __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**lowerCAmelCase_ ) __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCAmelCase_ , 'set_timesteps' ): scheduler.set_timesteps(lowerCAmelCase_ ) elif num_inference_steps is not None and not hasattr(lowerCAmelCase_ , 'set_timesteps' ): __lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] __lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] __lowerCAmelCase = scheduler.timesteps[5] __lowerCAmelCase = scheduler.timesteps[6] __lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample __lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowercase ( self : int ) -> int: # make sure that iterating over schedulers with same config names gives same results # for defaults __lowerCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() ) __lowerCAmelCase = self.full_loop(scheduler=lowerCAmelCase_ ) __lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_mean.item() - 0.24_64 ) < 1e-3 __lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase = self.full_loop(scheduler=lowerCAmelCase_ ) __lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_mean.item() - 0.24_64 ) < 1e-3 def lowercase ( self : Optional[Any] ) -> int: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ ) def lowercase ( self : Dict ) -> int: self.check_over_configs(thresholding=lowerCAmelCase_ ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , solver_order=lowerCAmelCase_ , solver_type=lowerCAmelCase_ , ) def lowercase ( self : List[Any] ) -> Optional[int]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase_ ) def lowercase ( self : Optional[Any] ) -> Optional[int]: for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowerCAmelCase_ , solver_type=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , ) __lowerCAmelCase = self.full_loop( solver_order=lowerCAmelCase_ , solver_type=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , ) assert not torch.isnan(lowerCAmelCase_ ).any(), "Samples have nan numbers" def lowercase ( self : Optional[Any] ) -> Union[str, Any]: self.check_over_configs(lower_order_final=lowerCAmelCase_ ) self.check_over_configs(lower_order_final=lowerCAmelCase_ ) def lowercase ( self : int ) -> Optional[int]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=lowerCAmelCase_ , time_step=0 ) def lowercase ( self : List[Any] ) -> int: __lowerCAmelCase = self.full_loop() __lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_mean.item() - 0.24_64 ) < 1e-3 def lowercase ( self : int ) -> List[str]: __lowerCAmelCase = self.full_loop(prediction_type='v_prediction' ) __lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_mean.item() - 0.10_14 ) < 1e-3 def lowercase ( self : Optional[Any] ) -> Any: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0 ) __lowerCAmelCase = scheduler_class(**lowerCAmelCase_ ) __lowerCAmelCase = 1_0 __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(lowerCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase = model(lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample assert sample.dtype == torch.floataa def lowercase ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ) -> str: for scheduler_class in self.scheduler_classes: __lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase_ ) __lowerCAmelCase = scheduler_class(**lowerCAmelCase_ ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
354
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _snake_case : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[str] = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
207
0
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" lowercase__ = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowercase__ = 4 lowercase__ = 48 lowercase__ = '''pixelshuffle_aux''' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ = [6, 6, 6, 6] lowercase__ = 60 lowercase__ = [6, 6, 6, 6] lowercase__ = '''pixelshuffledirect''' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ = 4 lowercase__ = '''nearest+conv''' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowercase__ = 1 lowercase__ = 1 lowercase__ = 126 lowercase__ = 7 lowercase__ = 255.0 lowercase__ = '''''' return config def _SCREAMING_SNAKE_CASE (A , A ) -> Dict: """simple docstring""" if "patch_embed.proj" in name and "layers" not in name: lowercase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowercase__ = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' ) if "layers" in name: lowercase__ = name.replace('''layers''' , '''encoder.stages''' ) if "residual_group.blocks" in name: lowercase__ = name.replace('''residual_group.blocks''' , '''layers''' ) if "attn.proj" in name: lowercase__ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowercase__ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowercase__ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowercase__ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowercase__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowercase__ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: lowercase__ = name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: lowercase__ = name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: lowercase__ = name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: lowercase__ = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if "patch_embed.proj" in name: lowercase__ = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' ) if name == "norm.weight": lowercase__ = '''layernorm.weight''' if name == "norm.bias": lowercase__ = '''layernorm.bias''' if "conv_first" in name: lowercase__ = name.replace('''conv_first''' , '''first_convolution''' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowercase__ = name.replace('''conv_last''' , '''final_convolution''' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowercase__ = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' ) if "upsample.0" in name: lowercase__ = name.replace('''upsample.0''' , '''upsample.convolution_0''' ) if "upsample.2" in name: lowercase__ = name.replace('''upsample.2''' , '''upsample.convolution_1''' ) lowercase__ = '''upsample.''' + name elif config.upsampler == "pixelshuffledirect": lowercase__ = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' ) lowercase__ = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' ) else: pass else: lowercase__ = '''swin2sr.''' + name return name def _SCREAMING_SNAKE_CASE (A , A ) -> Dict: """simple docstring""" for key in orig_state_dict.copy().keys(): lowercase__ = orig_state_dict.pop(A ) if "qkv" in key: lowercase__ = key.split('''.''' ) lowercase__ = int(key_split[1] ) lowercase__ = int(key_split[4] ) lowercase__ = config.embed_dim if "weight" in key: lowercase__ = val[:dim, :] lowercase__ = val[dim : dim * 2, :] lowercase__ = val[-dim:, :] else: lowercase__ = val[:dim] lowercase__ = val[dim : dim * 2] lowercase__ = val[-dim:] pass else: lowercase__ = val return orig_state_dict def _SCREAMING_SNAKE_CASE (A , A , A ) -> Tuple: """simple docstring""" lowercase__ = get_config(A ) lowercase__ = SwinaSRForImageSuperResolution(A ) model.eval() lowercase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' ) lowercase__ = convert_state_dict(A , A ) lowercase__ ,lowercase__ = model.load_state_dict(A , strict=A ) if len(A ) > 0: raise ValueError('''Missing keys when converting: {}'''.format(A ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f"Unexpected key {key} in state_dict" ) # verify values lowercase__ = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true''' lowercase__ = Image.open(requests.get(A , stream=A ).raw ).convert('''RGB''' ) lowercase__ = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowercase__ = 126 if '''Jpeg''' in checkpoint_url else 256 lowercase__ = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ = transforms(A ).unsqueeze(0 ) if config.num_channels == 1: lowercase__ = pixel_values[:, 0, :, :].unsqueeze(1 ) lowercase__ = model(A ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowercase__ = torch.Size([1, 3, 512, 512] ) lowercase__ = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowercase__ = torch.Size([1, 3, 1_024, 1_024] ) lowercase__ = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowercase__ = torch.Size([1, 3, 1_024, 1_024] ) lowercase__ = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowercase__ = torch.Size([1, 3, 512, 512] ) lowercase__ = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowercase__ = torch.Size([1, 3, 1_024, 1_024] ) lowercase__ = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , A , atol=1E-3 ) print('''Looks ok!''' ) lowercase__ = { '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': ( '''swin2SR-classical-sr-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': ( '''swin2SR-classical-sr-x4-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': ( '''swin2SR-compressed-sr-x4-48''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': ( '''swin2SR-lightweight-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': ( '''swin2SR-realworld-sr-x4-64-bsrgan-psnr''' ), } lowercase__ = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(A ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(A ) if push_to_hub: model.push_to_hub(f"caidas/{model_name}" ) processor.push_to_hub(f"caidas/{model_name}" ) if __name__ == "__main__": lowerCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth', type=str, help='URL of the original Swin2SR checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.') lowerCamelCase : Dict = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
2
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase : Any = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = ["pixel_values"] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_pad __UpperCamelCase = pad_size def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase ) __UpperCamelCase = (old_height // size + 1) * size - old_height __UpperCamelCase = (old_width // size + 1) * size - old_width return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_pad if do_pad is not None else self.do_pad __UpperCamelCase = pad_size if pad_size is not None else self.pad_size __UpperCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_pad: __UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] __UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __UpperCamelCase = {'pixel_values': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
316
0
"""simple docstring""" from graphs.minimum_spanning_tree_kruskal import kruskal def lowercase_ ( ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase : str = 9 __lowerCamelCase : Dict = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __lowerCamelCase : Tuple = kruskal(_lowerCamelCase , _lowerCamelCase ) __lowerCamelCase : List[str] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_lowerCamelCase ) == sorted(_lowerCamelCase )
362
"""simple docstring""" def lowercase_ ( _lowerCamelCase: int = 4000000 ) -> int: '''simple docstring''' __lowerCamelCase : Tuple = [0, 1] __lowerCamelCase : Union[str, Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 __lowerCamelCase : Tuple = 0 for j in range(len(_lowerCamelCase ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F"""{solution() = }""")
64
0
"""simple docstring""" import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def _A ( lowercase , lowercase , lowercase=[] ): """simple docstring""" a =size[0] - overlap_pixels * 2 a =size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels a =np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55 a =np.pad(lowercase , mode='''linear_ramp''' , pad_width=lowercase , end_values=0 ) if "l" in remove_borders: a =mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: a =mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: a =mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: a =mask[0 : mask.shape[0] - overlap_pixels, :] return mask def _A ( lowercase , lowercase , lowercase ): """simple docstring""" return max(lowercase , min(lowercase , lowercase ) ) def _A ( lowercase , lowercase , lowercase ): """simple docstring""" return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def _A ( lowercase , lowercase , lowercase ): """simple docstring""" a =list(lowercase ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap a =clamp_rect(lowercase , [0, 0] , [image_size[0], image_size[1]] ) return rect def _A ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" a =Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(lowercase , (original_slice, 0) ) return result def _A ( lowercase , lowercase ): """simple docstring""" a =(original_image_slice * 4, 0, tile.size[0], tile.size[1]) a =tile.crop(lowercase ) return tile def _A ( lowercase , lowercase ): """simple docstring""" a =n % d return n - divisor class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ) -> int: super().__init__( vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A , **__A ) -> Tuple: torch.manual_seed(0 ) a =( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) a =add_overlap_rect(__A , __A , image.size ) a =image.crop(__A ) a =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] a =translated_slice_x - (original_image_slice / 2) a =max(0 , __A ) a =squeeze_tile(__A , __A , __A , __A ) a =to_input.size a =to_input.resize((tile_size, tile_size) , Image.BICUBIC ) a =super(__A , self ).__call__(image=__A , **__A ).images[0] a =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) a =unsqueeze_tile(__A , __A ) a =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) a =[] if x == 0: remove_borders.append('''l''' ) elif crop_rect[2] == image.size[0]: remove_borders.append('''r''' ) if y == 0: remove_borders.append('''t''' ) elif crop_rect[3] == image.size[1]: remove_borders.append('''b''' ) a =Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode='''L''' , ) final_image.paste( __A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A ) @torch.no_grad() def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ) -> Optional[int]: a =Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) ) a =math.ceil(image.size[0] / tile_size ) a =math.ceil(image.size[1] / tile_size ) a =tcx * tcy a =0 for y in range(__A ): for x in range(__A ): self._process_tile( __A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , ) current_count += 1 if callback is not None: callback({'''progress''': current_count / total_tile_count, '''image''': final_image} ) return final_image def _A ( ): """simple docstring""" # Run a demo a ='''stabilityai/stable-diffusion-x4-upscaler''' a =StableDiffusionTiledUpscalePipeline.from_pretrained(lowercase , revision='''fp16''' , torch_dtype=torch.floataa ) a =pipe.to('''cuda''' ) a =Image.open('''../../docs/source/imgs/diffusers_library.jpg''' ) def callback(lowercase ): print(f'''progress: {obj["progress"]:.4f}''' ) obj["image"].save('''diffusers_library_progress.jpg''' ) a =pipe(image=lowercase , prompt='''Black font, white background, vector''' , noise_level=40 , callback=lowercase ) final_image.save('''diffusers_library.jpg''' ) if __name__ == "__main__": main()
81
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def _A ( lowercase ): """simple docstring""" a =SwinvaConfig() a =swinva_name.split('''_''' ) a =name_split[1] if "to" in name_split[3]: a =int(name_split[3][-3:] ) else: a =int(name_split[3] ) if "to" in name_split[2]: a =int(name_split[2][-2:] ) else: a =int(name_split[2][6:] ) if model_size == "tiny": a =96 a =(2, 2, 6, 2) a =(3, 6, 12, 24) elif model_size == "small": a =96 a =(2, 2, 18, 2) a =(3, 6, 12, 24) elif model_size == "base": a =1_28 a =(2, 2, 18, 2) a =(4, 8, 16, 32) else: a =1_92 a =(2, 2, 18, 2) a =(6, 12, 24, 48) if "to" in swinva_name: a =(12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): a =2_18_41 a ='''huggingface/label-files''' a ='''imagenet-22k-id2label.json''' a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) ) a ={int(lowercase ): v for k, v in idalabel.items()} a =idalabel a ={v: k for k, v in idalabel.items()} else: a =10_00 a ='''huggingface/label-files''' a ='''imagenet-1k-id2label.json''' a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) ) a ={int(lowercase ): v for k, v in idalabel.items()} a =idalabel a ={v: k for k, v in idalabel.items()} a =img_size a =num_classes a =embed_dim a =depths a =num_heads a =window_size return config def _A ( lowercase ): """simple docstring""" if "patch_embed.proj" in name: a =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: a =name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: a ='''encoder.''' + name if "attn.proj" in name: a =name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: a =name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: a =name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: a =name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: a =name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: a =name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: a =name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: a =name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: a =name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: a =name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if name == "norm.weight": a ='''layernorm.weight''' if name == "norm.bias": a ='''layernorm.bias''' if "head" in name: a =name.replace('''head''' , '''classifier''' ) else: a ='''swinv2.''' + name return name def _A ( lowercase , lowercase ): """simple docstring""" for key in orig_state_dict.copy().keys(): a =orig_state_dict.pop(lowercase ) if "mask" in key: continue elif "qkv" in key: a =key.split('''.''' ) a =int(key_split[1] ) a =int(key_split[3] ) a =model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: a =val[:dim, :] a =val[dim : dim * 2, :] a =val[-dim:, :] else: a =val[:dim] a =val[ dim : dim * 2 ] a =val[-dim:] else: a =val return orig_state_dict def _A ( lowercase , lowercase ): """simple docstring""" a =timm.create_model(lowercase , pretrained=lowercase ) timm_model.eval() a =get_swinva_config(lowercase ) a =SwinvaForImageClassification(lowercase ) model.eval() a =convert_state_dict(timm_model.state_dict() , lowercase ) model.load_state_dict(lowercase ) a ='''http://images.cocodataset.org/val2017/000000039769.jpg''' a =AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) ) a =Image.open(requests.get(lowercase , stream=lowercase ).raw ) a =image_processor(images=lowercase , return_tensors='''pt''' ) a =timm_model(inputs['''pixel_values'''] ) a =model(**lowercase ).logits assert torch.allclose(lowercase , lowercase , atol=1E-3 ) print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowercase ) model.push_to_hub( repo_path_or_name=Path(lowercase , lowercase ) , organization='''nandwalritik''' , commit_message='''Add model''' , ) if __name__ == "__main__": lowerCamelCase_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCamelCase_ : Union[str, Any] = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
81
1
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor A : List[Any] = logging.get_logger(__name__) class A ( UpperCAmelCase__ ): '''simple docstring''' def __init__(self : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> None: """simple docstring""" warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
146
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : Tuple = { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = '''convbert''' def __init__(self : str , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Dict=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : str=3072 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Tuple=1E-1_2 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Optional[Any]=9 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] , ) -> List[str]: """simple docstring""" super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = embedding_size lowercase__ = head_ratio lowercase__ = conv_kernel_size lowercase__ = num_groups lowercase__ = classifier_dropout class A ( UpperCAmelCase__ ): '''simple docstring''' @property def lowerCamelCase__ (self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
146
1
"""simple docstring""" import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" @slow def __magic_name__ (self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("""xlm-roberta-base""" ) SCREAMING_SNAKE_CASE__ : int = """The dog is cute and lives in the garden house""" SCREAMING_SNAKE_CASE__ : Any = jnp.array([tokenizer.encode(SCREAMING_SNAKE_CASE__ )] ) SCREAMING_SNAKE_CASE__ : Dict = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )["""last_hidden_state"""] self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
25
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = KandinskyInpaintPipeline _UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase :Dict = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase :Optional[int] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :int = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) UpperCamelCase : Optional[int] = MultilingualCLIP(A_ ) UpperCamelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.dummy_text_encoder UpperCamelCase : str = self.dummy_tokenizer UpperCamelCase : List[Any] = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Optional[Any] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) ) # create mask UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa ) UpperCamelCase : str = 0 if str(A_ ).startswith("mps" ): UpperCamelCase : int = torch.manual_seed(A_ ) else: UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "cpu" UpperCamelCase : Tuple = self.get_dummy_components() UpperCamelCase : str = self.pipeline_class(**A_ ) UpperCamelCase : Tuple = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : List[Any] = output.images UpperCamelCase : List[Any] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) UpperCamelCase : Union[str, Any] = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) UpperCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) UpperCamelCase : str = 0 UpperCamelCase : List[Any] = "a hat" UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) UpperCamelCase : Optional[Any] = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : Dict = pipeline( A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) UpperCamelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_ , A_ )
52
0
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class _A ( unittest.TestCase ): def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = Vector([1, 2, 3]) self.assertEqual(x.component(0) , 1) self.assertEqual(x.component(2) , 3) __a = Vector() def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = Vector([0, 0, 0, 0, 0, 1]) self.assertEqual(str(__SCREAMING_SNAKE_CASE) , '''(0,0,0,0,0,1)''') def _lowerCamelCase ( self : int): '''simple docstring''' __a = Vector([1, 2, 3, 4]) self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 4) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = Vector([1, 2]) __a = Vector([1, 2, 3, 4, 5]) __a = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) __a = Vector([1, -1, 1, -1, 2, -3, 4, -5]) self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3) self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3) self.assertEqual(z.euclidean_length() , 0) self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = Vector([1, 2, 3]) __a = Vector([1, 1, 1]) self.assertEqual((x + y).component(0) , 2) self.assertEqual((x + y).component(1) , 3) self.assertEqual((x + y).component(2) , 4) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = Vector([1, 2, 3]) __a = Vector([1, 1, 1]) self.assertEqual((x - y).component(0) , 0) self.assertEqual((x - y).component(1) , 1) self.assertEqual((x - y).component(2) , 2) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = Vector([1, 2, 3]) __a = Vector([2, -1, 4]) # for test of dot product __a = Vector([1, -2, -1]) self.assertEqual(str(x * 3.0) , '''(3.0,6.0,9.0)''') self.assertEqual((a * b) , 0) def _lowerCamelCase ( self : int): '''simple docstring''' self.assertEqual(str(zero_vector(10)).count('''0''') , 10) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1)) , '''(0,1,0)''') def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = Vector([1, 2, 3]) __a = Vector([1, 0, 1]) self.assertEqual(str(axpy(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) , '''(3,4,7)''') def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = Vector([1, 0, 0, 0, 0, 0]) __a = x.copy() self.assertEqual(str(__SCREAMING_SNAKE_CASE) , str(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = Vector([1, 0, 0]) x.change_component(0 , 0) x.change_component(1 , 1) self.assertEqual(str(__SCREAMING_SNAKE_CASE) , '''(0,1,0)''') def _lowerCamelCase ( self : str): '''simple docstring''' __a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) __a = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(minors[x][y] , a.minor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) __a = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(cofactors[x][y] , a.cofactor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : str): '''simple docstring''' __a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(-5 , a.determinant()) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3) __a = Vector([1, 2, 3]) self.assertEqual('''(14,32,50)''' , str(a * x)) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2)) def _lowerCamelCase ( self : str): '''simple docstring''' __a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) a.change_component(0 , 2 , 5) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(__SCREAMING_SNAKE_CASE)) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(7 , a.component(2 , 1) , 0.01) def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) __a = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b)) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) __a = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b)) def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5)) , ) if __name__ == "__main__": unittest.main()
365
# Lint as: python3 import itertools import os import re __snake_case :int = re.compile(r'''([A-Z]+)([A-Z][a-z])''') __snake_case :List[Any] = re.compile(r'''([a-z\d])([A-Z])''') __snake_case :Dict = re.compile(r'''(?<!_)_(?!_)''') __snake_case :Optional[int] = re.compile(r'''(_{2,})''') __snake_case :Optional[Any] = r'''^\w+(\.\w+)*$''' __snake_case :Union[str, Any] = r'''<>:/\|?*''' def __snake_case ( _UpperCAmelCase ): __a = _uppercase_uppercase_re.sub(R'''\1_\2''' , _UpperCAmelCase ) __a = _lowercase_uppercase_re.sub(R'''\1_\2''' , _UpperCAmelCase ) return name.lower() def __snake_case ( _UpperCAmelCase ): __a = _single_underscore_re.split(_UpperCAmelCase ) __a = [_multiple_underscores_re.split(_UpperCAmelCase ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(_UpperCAmelCase ) if n != '''''' ) def __snake_case ( _UpperCAmelCase ): if os.path.basename(_UpperCAmelCase ) != name: raise ValueError(f'Should be a dataset name, not a path: {name}' ) return camelcase_to_snakecase(_UpperCAmelCase ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if os.path.basename(_UpperCAmelCase ) != name: raise ValueError(f'Should be a dataset name, not a path: {name}' ) if not re.match(_split_re , _UpperCAmelCase ): raise ValueError(f'Split name should match \'{_split_re}\'\' but got \'{split}\'.' ) return f'{filename_prefix_for_name(_UpperCAmelCase )}-{split}' def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ): __a = filename_prefix_for_split(_UpperCAmelCase , _UpperCAmelCase ) if filetype_suffix: prefix += f'.{filetype_suffix}' __a = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) return f'{filepath}*' def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ): __a = filename_prefix_for_split(_UpperCAmelCase , _UpperCAmelCase ) __a = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if shard_lengths: __a = len(_UpperCAmelCase ) __a = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(_UpperCAmelCase )] if filetype_suffix: __a = [filename + f'.{filetype_suffix}' for filename in filenames] return filenames else: __a = prefix if filetype_suffix: filename += f'.{filetype_suffix}' return [filename]
131
0
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class a__( enum.Enum ): '''simple docstring''' UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : Any = 2 @add_end_docstrings(lowerCAmelCase__ ) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : int = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. lowerCAmelCase = None if self.model.config.prefix is not None: lowerCAmelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. lowerCAmelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params) lowerCAmelCase = {**self._preprocess_params, **preprocess_params} lowerCAmelCase = {**self._forward_params, **forward_params} def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ): """simple docstring""" lowerCAmelCase = {} if prefix is not None: lowerCAmelCase = prefix if prefix: lowerCAmelCase = self.tokenizer( __lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework) lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" """ [None, 'hole']""") lowerCAmelCase = handle_long_generation preprocess_params.update(__lowerCAmelCase) lowerCAmelCase = generate_kwargs lowerCAmelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""") if return_tensors is not None: raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""") lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""") lowerCAmelCase = ReturnType.TENSORS if return_type is not None: lowerCAmelCase = return_type if clean_up_tokenization_spaces is not None: lowerCAmelCase = clean_up_tokenization_spaces if stop_sequence is not None: lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) if len(__lowerCAmelCase) > 1: warnings.warn( """Stopping on a multiple token sequence is not yet supported on transformers. The first token of""" """ the stop sequence will be used as the stop sequence string in the interim.""") lowerCAmelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"""add_space_before_punct_symbol""": True}) return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase) def __call__( self , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return super().__call__(__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = self.tokenizer( prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework) lowerCAmelCase = prompt_text if handle_long_generation == "hole": lowerCAmelCase = inputs["""input_ids"""].shape[-1] if "max_new_tokens" in generate_kwargs: lowerCAmelCase = generate_kwargs["""max_new_tokens"""] else: lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len if new_tokens < 0: raise ValueError("""We cannot infer how many new tokens are expected""") if cur_len + new_tokens > self.tokenizer.model_max_length: lowerCAmelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( """We cannot use `hole` to handle this generation the number of desired tokens exceeds the""" """ models max length""") lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:] if "attention_mask" in inputs: lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:] return inputs def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = model_inputs["""input_ids"""] lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase) # Allow empty prompts if input_ids.shape[1] == 0: lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = 1 else: lowerCAmelCase = input_ids.shape[0] lowerCAmelCase = model_inputs.pop("""prompt_text""") # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0) if prefix_length > 0: lowerCAmelCase = """max_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].max_new_tokens is not None ) if not has_max_new_tokens: lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length generate_kwargs["max_length"] += prefix_length lowerCAmelCase = """min_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = generated_sequence.shape[0] if self.framework == "pt": lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:]) elif self.framework == "tf": lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:])) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True): """simple docstring""" lowerCAmelCase = model_outputs["""generated_sequence"""][0] lowerCAmelCase = model_outputs["""input_ids"""] lowerCAmelCase = model_outputs["""prompt_text"""] lowerCAmelCase = generated_sequence.numpy().tolist() lowerCAmelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: lowerCAmelCase = {"""generated_token_ids""": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text lowerCAmelCase = self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: lowerCAmelCase = 0 else: lowerCAmelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )) if return_type == ReturnType.FULL_TEXT: lowerCAmelCase = prompt_text + text[prompt_length:] else: lowerCAmelCase = text[prompt_length:] lowerCAmelCase = {"""generated_text""": all_text} records.append(__lowerCAmelCase) return records
272
'''simple docstring''' from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : str = '''EncodecFeatureExtractor''' UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = self.feature_extractor lowerCAmelCase = False def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True): """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase) def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase) if len(__lowerCAmelCase) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""") if text is not None: lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase) if audio is not None: lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCAmelCase = audio_inputs["""input_values"""] if "padding_mask" in audio_inputs: lowerCAmelCase = audio_inputs["""padding_mask"""] return inputs def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase) if len(__lowerCAmelCase) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio_values is not None: return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase) else: return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = to_numpy(__lowerCAmelCase) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape if padding_mask is None: return list(__lowerCAmelCase) lowerCAmelCase = to_numpy(__lowerCAmelCase) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCAmelCase = seq_len - padding_mask.shape[-1] lowerCAmelCase = 1 - self.feature_extractor.padding_value lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase) lowerCAmelCase = audio_values.tolist() for i in range(__lowerCAmelCase): lowerCAmelCase = np.asarray(audio_values[i])[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1) return audio_values
272
1
def _UpperCamelCase (a__ :str ): """simple docstring""" UpperCamelCase__ = 0 # if input_string is "aba" than new_input_string become "a|b|a" UpperCamelCase__ = """""" UpperCamelCase__ = """""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(a__ ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring UpperCamelCase__ , UpperCamelCase__ = 0, 0 # length[i] shows the length of palindromic substring with center i UpperCamelCase__ = [1 for i in range(len(a__ ) )] # for each character in new_string find corresponding palindromic string UpperCamelCase__ = 0 for j in range(len(a__ ) ): UpperCamelCase__ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(a__ ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 UpperCamelCase__ = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: UpperCamelCase__ = j - k + 1 # noqa: E741 UpperCamelCase__ = j + k - 1 # update max_length and start position if max_length < length[j]: UpperCamelCase__ = length[j] UpperCamelCase__ = j # create that string UpperCamelCase__ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
87
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _lowerCamelCase ( self ): UpperCamelCase__ = """ZinengTang/tvlt-base""" UpperCamelCase__ = tempfile.mkdtemp() def _lowerCamelCase ( self , **__lowerCAmelCase ): return TvltImageProcessor.from_pretrained(self.checkpoint , **__lowerCAmelCase ) def _lowerCamelCase ( self , **__lowerCAmelCase ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCAmelCase ) def _lowerCamelCase ( self ): shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self ): UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_feature_extractor() UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def _lowerCamelCase ( self ): UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_feature_extractor() UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase ) UpperCamelCase__ = np.ones([12000] ) UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" ) UpperCamelCase__ = processor(audio=__lowerCAmelCase , return_tensors="""np""" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowerCamelCase ( self ): UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_feature_extractor() UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase ) UpperCamelCase__ = np.ones([3, 224, 224] ) UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""np""" ) UpperCamelCase__ = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowerCamelCase ( self ): UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_feature_extractor() UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase ) UpperCamelCase__ = np.ones([12000] ) UpperCamelCase__ = np.ones([3, 224, 224] ) UpperCamelCase__ = processor(audio=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def _lowerCamelCase ( self ): UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_feature_extractor() UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
87
1
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Any = logging.get_logger(__name__) __A : int = '''▁''' __A : List[Any] = {'''vocab_file''': '''prophetnet.tokenizer'''} __A : int = { '''vocab_file''': { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer''' ), } } __A : Union[str, Any] = { '''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False}, } __A : Any = { '''microsoft/xprophetnet-large-wiki100-cased''': 512, } def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]: '''simple docstring''' lowerCAmelCase : int = collections.OrderedDict() with open(_UpperCAmelCase, 'r', encoding='utf-8' ) as reader: lowerCAmelCase : Dict = reader.readlines() for index, token in enumerate(_UpperCAmelCase ): lowerCAmelCase : str = token.rstrip('\n' ) lowerCAmelCase : Optional[int] = index return vocab class __A ( lowerCAmelCase ): lowerCAmelCase_ : Any = VOCAB_FILES_NAMES lowerCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[int]="[UNK]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : List[str]="[CLS]" , UpperCAmelCase_ : List[Any]="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : int , ): lowerCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , ) try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase_ ) ) lowerCAmelCase : int = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab lowerCAmelCase : int = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4} for i in range(10 ): lowerCAmelCase : str = f"[unused{i}]" lowerCAmelCase : Union[str, Any] = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab lowerCAmelCase : Any = 12 lowerCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(UpperCAmelCase_ ) def __getstate__( self : Optional[int] ): lowerCAmelCase : Union[str, Any] = self.__dict__.copy() lowerCAmelCase : Any = None return state def __setstate__( self : Union[str, Any] , UpperCAmelCase_ : Dict ): lowerCAmelCase : Dict = d try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase : Any = {} lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self : List[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return ([0] * len(UpperCAmelCase_ )) + [1] return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowercase__ ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[int] = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self : Tuple ): return len(self.sp_model ) + self.fairseq_offset def lowercase__ ( self : List[Any] ): lowerCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : str ): return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) def lowercase__ ( self : Dict , UpperCAmelCase_ : int ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase : Dict = self.sp_model.PieceToId(UpperCAmelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase__ ( self : int , UpperCAmelCase_ : Optional[int] ): lowerCAmelCase : Union[str, Any] = ''.join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , ' ' ).strip() return out_string def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase : Dict = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , 'wb' ) as fi: lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,) def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ): if token_ids_a is None: return token_ids_a + [self.sep_token_id] lowerCAmelCase : Union[str, Any] = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
138
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __A : Tuple = { '''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''' } def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "dhaka", _UpperCAmelCase = 5 ) -> int: '''simple docstring''' lowerCAmelCase : List[Any] = min(_UpperCAmelCase, 50 ) # Prevent abuse! lowerCAmelCase : str = { 'q': query, 'tbm': 'isch', 'hl': 'en', 'ijn': '0', } lowerCAmelCase : Optional[Any] = requests.get('https://www.google.com/search', params=_UpperCAmelCase, headers=_UpperCAmelCase ) lowerCAmelCase : int = BeautifulSoup(html.text, 'html.parser' ) lowerCAmelCase : List[Any] = ''.join( re.findall(r'AF_initDataCallback\(([^<]+)\);', str(soup.select('script' ) ) ) ) lowerCAmelCase : Optional[int] = json.dumps(_UpperCAmelCase ) lowerCAmelCase : str = json.loads(_UpperCAmelCase ) lowerCAmelCase : str = re.findall( r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",', _UpperCAmelCase, ) if not matched_google_image_data: return 0 lowerCAmelCase : Tuple = re.sub( r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]', '', str(_UpperCAmelCase ), ) lowerCAmelCase : Dict = re.findall( r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]', _UpperCAmelCase, ) for index, fixed_full_res_image in enumerate(_UpperCAmelCase ): if index >= max_images: return index lowerCAmelCase : Any = bytes(_UpperCAmelCase, 'ascii' ).decode( 'unicode-escape' ) lowerCAmelCase : Tuple = bytes(_UpperCAmelCase, 'ascii' ).decode( 'unicode-escape' ) lowerCAmelCase : Optional[Any] = urllib.request.build_opener() lowerCAmelCase : Any = [ ( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582', ) ] urllib.request.install_opener(_UpperCAmelCase ) lowerCAmelCase : List[str] = f"query_{query.replace(' ', '_' )}" if not os.path.exists(_UpperCAmelCase ): os.makedirs(_UpperCAmelCase ) urllib.request.urlretrieve( # noqa: S310 _UpperCAmelCase, f"{path_name}/original_size_img_{index}.jpg" ) return index if __name__ == "__main__": try: __A : Tuple = download_images_from_google_query(sys.argv[1]) print(F'{image_count} images were downloaded to disk.') except IndexError: print('''Please provide a search term.''') raise
138
1
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
318
"""simple docstring""" from collections import defaultdict def _A (__a , __a ) -> bool: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip() SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip() # Remove whitespace SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__a ) != len(__a ): return False # Default values for count should be 0 SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a ) # For each character in input strings, # increment count in the corresponding for i in range(len(__a ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ : Any = input("""Enter the first string """).strip() UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip() UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
318
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): def __init__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int]=1_3 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Optional[Any]=2_2_4 , UpperCAmelCase__ : Optional[Any]=3_0 , UpperCAmelCase__ : Any=4_0_0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Optional[int]=[0.5, 0.5, 0.5] , ) -> int: lowerCAmelCase = size if size is not None else {'height': 1_8, 'width': 1_8} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_normalize lowerCAmelCase = image_mean lowerCAmelCase = image_std def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): lowerCamelCase : Dict = ViTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self : List[str] ) -> List[Any]: lowerCAmelCase = EfficientFormerImageProcessorTester(self ) @property def __UpperCAmelCase ( self : List[Any] ) -> str: return self.image_proc_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self : List[Any] ) -> Tuple: lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , 'image_mean' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'image_std' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) ) def __UpperCAmelCase ( self : int ) -> Optional[int]: pass def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: # Initialize image_processor lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input lowerCAmelCase = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) def __UpperCAmelCase ( self : str ) -> Tuple: # Initialize image_processor lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input lowerCAmelCase = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) def __UpperCAmelCase ( self : List[Any] ) -> int: # Initialize image_processor lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , ) # Test batched lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) , )
4
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __snake_case =random.Random() if is_torch_available(): import torch def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ): if rng is None: lowerCAmelCase = global_rng lowerCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase_ ( unittest.TestCase ): def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = min_seq_length lowerCAmelCase = max_seq_length lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCAmelCase = feature_size lowerCAmelCase = padding_value lowerCAmelCase = sampling_rate lowerCAmelCase = return_attention_mask lowerCAmelCase = do_normalize def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]: def _flatten(UpperCAmelCase__ : int ): return list(itertools.chain(*UpperCAmelCase__ ) ) if equal_length: lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size lowerCAmelCase = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): lowerCamelCase : Dict = ASTFeatureExtractor def __UpperCAmelCase ( self : str ) -> Optional[int]: lowerCAmelCase = ASTFeatureExtractionTester(self ) def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: # Tests that all call wrap to encode_plus and batch_encode_plus lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs] # Test not batched input lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) # Test batched lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] lowerCAmelCase = np.asarray(UpperCAmelCase__ ) lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) @require_torch def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: import torch lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa ) lowerCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple: from datasets import load_dataset lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] @require_torch def __UpperCAmelCase ( self : str ) -> Optional[Any]: # fmt: off lowerCAmelCase = torch.tensor( [-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776, -1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133, -1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936, -0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] ) # fmt: on lowerCAmelCase = self._load_datasamples(1 ) lowerCAmelCase = ASTFeatureExtractor() lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) ) self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
4
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : str ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Any = SwinConfig( embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=["""stage2""", """stage3""", """stage4"""] , ) lowerCAmelCase_ :str = DetaConfig( backbone_config=lowercase__ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=lowercase__ , with_box_refine=lowercase__ , two_stage=lowercase__ , ) # set labels lowerCAmelCase_ :List[Any] = """huggingface/label-files""" if "o365" in model_name: lowerCAmelCase_ :Union[str, Any] = 3_6_6 lowerCAmelCase_ :Union[str, Any] = """object365-id2label.json""" else: lowerCAmelCase_ :Any = 9_1 lowerCAmelCase_ :Optional[Any] = """coco-detection-id2label.json""" lowerCAmelCase_ :str = num_labels lowerCAmelCase_ :List[str] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type="""dataset""" ) ) , """r""" ) ) lowerCAmelCase_ :Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()} lowerCAmelCase_ :str = idalabel lowerCAmelCase_ :Optional[Any] = {v: k for k, v in idalabel.items()} return config def _snake_case ( lowercase__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Dict = [] # stem # fmt: off rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") ) rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") ) rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") ) rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") ) rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") ) rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") ) # fmt: on return rename_keys def _snake_case ( lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :str = dct.pop(lowercase__ ) lowerCAmelCase_ :int = val def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase_ :Optional[int] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase_ :Tuple = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" ) lowerCAmelCase_ :Any = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = in_proj_weight[:dim, :] lowerCAmelCase_ :Dict = in_proj_bias[: dim] lowerCAmelCase_ :Optional[Any] = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase_ :List[Any] = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase_ :List[str] = in_proj_weight[ -dim :, : ] lowerCAmelCase_ :List[Any] = in_proj_bias[-dim :] # fmt: on def _snake_case ( lowercase__ : str , lowercase__ : int ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention lowerCAmelCase_ :List[Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) lowerCAmelCase_ :Union[str, Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ :List[str] = in_proj_weight[:hidden_size, :] lowerCAmelCase_ :Optional[int] = in_proj_bias[:hidden_size] lowerCAmelCase_ :Optional[int] = in_proj_weight[ hidden_size : hidden_size * 2, : ] lowerCAmelCase_ :int = in_proj_bias[hidden_size : hidden_size * 2] lowerCAmelCase_ :Dict = in_proj_weight[-hidden_size:, :] lowerCAmelCase_ :List[Any] = in_proj_bias[-hidden_size:] def _snake_case ( ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Any = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im @torch.no_grad() def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : List[str] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :str = get_deta_config(lowercase__ ) # load original state dict if model_name == "deta-swin-large": lowerCAmelCase_ :str = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" ) elif model_name == "deta-swin-large-o365": lowerCAmelCase_ :Union[str, Any] = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" ) else: raise ValueError(f"""Model name {model_name} not supported""" ) lowerCAmelCase_ :int = torch.load(lowercase__ , map_location="""cpu""" )["""model"""] # original state dict for name, param in state_dict.items(): print(lowercase__ , param.shape ) # rename keys lowerCAmelCase_ :Any = create_rename_keys(lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_swin_q_k_v(lowercase__ , config.backbone_config ) read_in_decoder_q_k_v(lowercase__ , lowercase__ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: lowerCAmelCase_ :List[str] = state_dict.pop(lowercase__ ) lowerCAmelCase_ :Optional[int] = val if "input_proj" in key: lowerCAmelCase_ :str = state_dict.pop(lowercase__ ) lowerCAmelCase_ :List[str] = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: lowerCAmelCase_ :List[str] = state_dict.pop(lowercase__ ) lowerCAmelCase_ :Tuple = val # finally, create HuggingFace model and load state dict lowerCAmelCase_ :Any = DetaForObjectDetection(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() lowerCAmelCase_ :Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu""" model.to(lowercase__ ) # load image processor lowerCAmelCase_ :Any = DetaImageProcessor(format="""coco_detection""" ) # verify our conversion on image lowerCAmelCase_ :str = prepare_img() lowerCAmelCase_ :int = processor(images=lowercase__ , return_tensors="""pt""" ) lowerCAmelCase_ :int = encoding["""pixel_values"""] lowerCAmelCase_ :Tuple = model(pixel_values.to(lowercase__ ) ) # verify logits print("""Logits:""" , outputs.logits[0, :3, :3] ) print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": lowerCAmelCase_ :Optional[int] = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) lowerCAmelCase_ :Union[str, Any] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": lowerCAmelCase_ :Union[str, Any] = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) lowerCAmelCase_ :Optional[int] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowercase__ ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowercase__ ) , atol=1E-4 ) print("""Everything ok!""" ) if pytorch_dump_folder_path: # Save model and processor logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) model.save_pretrained(lowercase__ ) processor.save_pretrained(lowercase__ ) # Push to hub if push_to_hub: print("""Pushing model and processor to hub...""" ) model.push_to_hub(f"""jozhang97/{model_name}""" ) processor.push_to_hub(f"""jozhang97/{model_name}""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--model_name', type=str, default='deta-swin-large', choices=['deta-swin-large', 'deta-swin-large-o365'], help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __UpperCAmelCase = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
1
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ): UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :List[str] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and""" f""" `n_embd`: {n_embd} are not equal.""" ) lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim lowerCAmelCase_ :str = prefix_hidden_dim lowerCAmelCase_ :str = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :List[Any] = ( nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :Any = GPTaConfig( vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , ) lowerCAmelCase_ :Any = GPTaLMHeadModel(__A ) def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]: lowerCAmelCase_ :str = self.transformer.transformer.wte(__A ) lowerCAmelCase_ :Any = self.encode_prefix(__A ) lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A ) lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor: return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: return self.encode_prefix(__A ) @torch.no_grad() def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]: lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 ) lowerCAmelCase_ :Optional[int] = [] lowerCAmelCase_ :List[str] = [] for feature in features: lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature # Only support beam search for now lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam( input_embeds=__A , device=__A , eos_token_id=__A ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) lowerCAmelCase_ :Tuple = torch.stack(__A ) lowerCAmelCase_ :int = torch.stack(__A ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = eos_token_id lowerCAmelCase_ :Optional[int] = None lowerCAmelCase_ :Any = None lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int ) lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool ) if input_embeds is not None: lowerCAmelCase_ :List[str] = input_embeds else: lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A ) for i in range(__A ): lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A ) lowerCAmelCase_ :str = outputs.logits lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) lowerCAmelCase_ :Dict = logits.softmax(-1 ).log() if scores is None: lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 ) lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: lowerCAmelCase_ :List[str] = next_tokens else: lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] ) lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 ) else: lowerCAmelCase_ :List[Any] = -float(np.inf ) lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Optional[int] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None] lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 ) lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1] lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source] lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1] lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 ) lowerCAmelCase_ :str = tokens[next_tokens_source] lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 ) lowerCAmelCase_ :Dict = generated[next_tokens_source] lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source] lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 ) lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze() if is_stopped.all(): break lowerCAmelCase_ :str = scores / seq_lengths lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A ) # tokens tensors are already padded to max_seq_length lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order] lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 ) lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
1
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor _A = logging.get_logger(__name__) class _lowerCAmelCase ( __lowerCamelCase ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple: warnings.warn( "The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use FlavaImageProcessor instead." , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
231
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase : Optional[int] = logging.get_logger(__name__) __UpperCAmelCase : Optional[int] = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class __snake_case ( __lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = """ibert""" def __init__( self : Dict , A : Union[str, Any]=30_522 , A : List[Any]=768 , A : List[Any]=12 , A : Optional[int]=12 , A : Optional[Any]=3_072 , A : int="gelu" , A : str=0.1 , A : List[Any]=0.1 , A : Optional[Any]=512 , A : int=2 , A : Union[str, Any]=0.02 , A : List[str]=1E-12 , A : Optional[int]=1 , A : Optional[int]=0 , A : List[str]=2 , A : str="absolute" , A : Any=False , A : Optional[Any]="none" , **A : Any , ): super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A ) __snake_case: List[Any] = vocab_size __snake_case: Optional[Any] = hidden_size __snake_case: List[str] = num_hidden_layers __snake_case: Tuple = num_attention_heads __snake_case: List[str] = hidden_act __snake_case: Optional[Any] = intermediate_size __snake_case: Tuple = hidden_dropout_prob __snake_case: List[str] = attention_probs_dropout_prob __snake_case: Any = max_position_embeddings __snake_case: int = type_vocab_size __snake_case: List[str] = initializer_range __snake_case: List[Any] = layer_norm_eps __snake_case: Optional[int] = position_embedding_type __snake_case: str = quant_mode __snake_case: Optional[int] = force_dequant class __snake_case ( __lowerCamelCase ): '''simple docstring''' @property def UpperCAmelCase__ ( self : Tuple ): if self.task == "multiple-choice": __snake_case: List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __snake_case: List[str] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
111
0
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): """simple docstring""" lowerCAmelCase__ = len(lowerCAmelCase_ ) lowerCAmelCase__ = len(lowerCAmelCase_ ) lowerCAmelCase__ = ( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCAmelCase__ = [] for char_count in range(lowerCAmelCase_ ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(lowerCAmelCase_ ) if __name__ == "__main__": print(alternative_string_arrange('AB', 'XYZ'), end=' ')
221
from maths.prime_factors import prime_factors def _A ( lowerCAmelCase_ : int ): """simple docstring""" if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): lowerCAmelCase__ = F'Input value of [number={number}] must be an integer' raise TypeError(lowerCAmelCase_ ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(lowerCAmelCase_ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
221
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __snake_case : Optional[Any] = logging.get_logger(__name__) @dataclass class lowerCamelCase ( __a ): '''simple docstring''' __snake_case = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : Optional[int] , **lowerCAmelCase_ : Any ) -> Tuple: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: A__ : Dict =deprecated_arg[3:] setattr(self , a_ , not kwargs.pop(a_ ) ) logger.warning( f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or" f" {positive_arg}={kwargs[positive_arg]}" ) A__ : List[Any] =kwargs.pop("""torchscript""" , self.torchscript ) A__ : int =kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) A__ : Optional[int] =kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**a_ ) __snake_case = field(default=__a , metadata={'help': 'Trace the models using torchscript'} ) __snake_case = field(default=__a , metadata={'help': 'Print Xla/PyTorch tpu metrics'} ) __snake_case = field( default='O1' , metadata={ 'help': ( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ' 'See details at https://nvidia.github.io/apex/amp.html' ) } , ) @cached_property def lowercase__ ( self : List[str] ) -> str: '''simple docstring''' requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: A__ : List[Any] =torch.device("""cpu""" ) A__ : int =0 elif is_torch_tpu_available(): A__ : Any =xm.xla_device() A__ : int =0 else: A__ : List[str] =torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A__ : Dict =torch.cuda.device_count() return device, n_gpu @property def lowercase__ ( self : List[Any] ) -> Any: '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def lowercase__ ( self : Dict ) -> Any: '''simple docstring''' requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def lowercase__ ( self : str ) -> Any: '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def lowercase__ ( self : Any ) -> List[str]: '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def lowercase__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' return self.n_gpu > 0
134
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : Union[str, Any] ): """simple docstring""" stooge(snake_case__ , 0 , len(snake_case__ ) - 1 ) return arr def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ): """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _snake_case , _snake_case : Tuple = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _snake_case : Dict = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(snake_case__ , snake_case__ , (h - t) ) # Recursively sort last 2/3 elements stooge(snake_case__ , i + t , (snake_case__) ) # Recursively sort first 2/3 elements stooge(snake_case__ , snake_case__ , (h - t) ) if __name__ == "__main__": A_ = input('''Enter numbers separated by a comma:\n''').strip() A_ = [int(item) for item in user_input.split(''',''')] print(stooge_sort(unsorted))
64
0
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING _UpperCamelCase : List[Any] = logging.get_logger(__name__) _UpperCamelCase : int = Dict[str, Any] _UpperCamelCase : int = List[Prediction] @add_end_docstrings(UpperCAmelCase ) class snake_case ( UpperCAmelCase ): def __init__( self : int , *A : List[str] , **A : Any ): '''simple docstring''' super().__init__(*__A , **__A ) if self.framework == "tf": raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , 'vision' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def lowerCamelCase__ ( self : Union[str, Any] , **A : Optional[int] ): '''simple docstring''' a : Optional[Any] = {} if "threshold" in kwargs: a : Dict = kwargs['threshold'] return {}, {}, postprocess_kwargs def __call__( self : Any , *A : Optional[Any] , **A : Union[str, Any] ): '''simple docstring''' return super().__call__(*__A , **__A ) def lowerCamelCase__ ( self : Union[str, Any] , A : str ): '''simple docstring''' a : Tuple = load_image(__A ) a : Dict = torch.IntTensor([[image.height, image.width]] ) a : int = self.image_processor(images=[image] , return_tensors='pt' ) if self.tokenizer is not None: a : List[str] = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' ) a : Any = target_size return inputs def lowerCamelCase__ ( self : str , A : Any ): '''simple docstring''' a : Union[str, Any] = model_inputs.pop('target_size' ) a : Tuple = self.model(**__A ) a : Any = outputs.__class__({'target_size': target_size, **outputs} ) if self.tokenizer is not None: a : str = model_inputs['bbox'] return model_outputs def lowerCamelCase__ ( self : Tuple , A : Tuple , A : Dict=0.9 ): '''simple docstring''' a : str = model_outputs['target_size'] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. a, a : Dict = target_size[0].tolist() def unnormalize(A : str ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1_0_0_0), (height * bbox[1] / 1_0_0_0), (width * bbox[2] / 1_0_0_0), (height * bbox[3] / 1_0_0_0), ] ) ) a, a : Union[str, Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) a : Tuple = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] a : List[Any] = [unnormalize(__A ) for bbox in model_outputs['bbox'].squeeze(0 )] a : Any = ['score', 'label', 'box'] a : str = [dict(zip(__A , __A ) ) for vals in zip(scores.tolist() , __A , __A ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel a : Dict = self.image_processor.post_process_object_detection(__A , __A , __A ) a : Any = raw_annotations[0] a : List[Any] = raw_annotation['scores'] a : str = raw_annotation['labels'] a : Optional[Any] = raw_annotation['boxes'] a : Dict = scores.tolist() a : Optional[Any] = [self.model.config.idalabel[label.item()] for label in labels] a : Dict = [self._get_bounding_box(__A ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] a : Optional[Any] = ['score', 'label', 'box'] a : Tuple = [ dict(zip(__A , __A ) ) for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] ) ] return annotation def lowerCamelCase__ ( self : Optional[int] , A : "torch.Tensor" ): '''simple docstring''' if self.framework != "pt": raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' ) a, a, a, a : Optional[Any] = box.int().tolist() a : Dict = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
359
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class snake_case ( UpperCAmelCase ): def lowerCamelCase__ ( self : Dict ): '''simple docstring''' a : Any = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(A , 'num_attention_heads' ) ) self.parent.assertTrue(hasattr(A , 'num_encoder_blocks' ) ) class snake_case : def __init__( self : List[Any] , A : Dict , A : List[Any]=1_3 , A : str=6_4 , A : Union[str, Any]=3 , A : Union[str, Any]=4 , A : Union[str, Any]=[2, 2, 2, 2] , A : List[str]=[8, 4, 2, 1] , A : Optional[Any]=[1_6, 3_2, 6_4, 1_2_8] , A : Optional[Any]=[1, 4, 8, 1_6] , A : Tuple=[1, 2, 4, 8] , A : Optional[Any]=True , A : Any=True , A : Optional[Any]="gelu" , A : Optional[int]=0.1 , A : List[Any]=0.1 , A : List[str]=0.02 , A : List[Any]=3 , A : str=None , ): '''simple docstring''' a : Optional[Any] = parent a : Optional[Any] = batch_size a : Optional[Any] = image_size a : Optional[int] = num_channels a : List[str] = num_encoder_blocks a : Optional[Any] = sr_ratios a : Any = depths a : Any = hidden_sizes a : Union[str, Any] = downsampling_rates a : Any = num_attention_heads a : int = is_training a : Dict = use_labels a : str = hidden_act a : Optional[int] = hidden_dropout_prob a : Union[str, Any] = attention_probs_dropout_prob a : Optional[Any] = initializer_range a : Dict = num_labels a : Union[str, Any] = scope def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a : int = None if self.use_labels: a : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a : str = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self : int , A : str , A : List[Any] , A : List[Any] ): '''simple docstring''' a : Optional[Any] = SegformerModel(config=A ) model.to(A ) model.eval() a : Union[str, Any] = model(A ) a : Optional[int] = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def lowerCamelCase__ ( self : Optional[int] , A : Union[str, Any] , A : str , A : Optional[Any] ): '''simple docstring''' a : List[Any] = self.num_labels a : Optional[int] = SegformerForSemanticSegmentation(A ) model.to(A ) model.eval() a : str = model(A ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) a : int = model(A , labels=A ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def lowerCamelCase__ ( self : Dict , A : Dict , A : Any , A : Optional[Any] ): '''simple docstring''' a : Optional[int] = 1 a : List[Any] = SegformerForSemanticSegmentation(config=A ) model.to(A ) model.eval() a : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(A ) a : Dict = model(A , labels=A ) self.parent.assertGreater(result.loss , 0.0 ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' a : str = self.prepare_config_and_inputs() a, a, a : str = config_and_inputs a : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): __magic_name__ = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False def lowerCamelCase__ ( self : Any ): '''simple docstring''' a : Union[str, Any] = SegformerModelTester(self ) a : Tuple = SegformerConfigTester(self , config_class=A ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*A ) def lowerCamelCase__ ( self : int ): '''simple docstring''' a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*A ) @unittest.skip('SegFormer does not use inputs_embeds' ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Dict ): '''simple docstring''' a, a : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : Dict = model_class(A ) a : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a : List[str] = [*signature.parameters.keys()] a : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , A ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' a, a : Any = self.model_tester.prepare_config_and_inputs_for_common() a : Any = True for model_class in self.all_model_classes: a : Optional[Any] = True a : Tuple = False a : int = True a : Any = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): a : Dict = model(**self._prepare_for_class(A , A ) ) a : Union[str, Any] = outputs.attentions a : Tuple = sum(self.model_tester.depths ) self.assertEqual(len(A ) , A ) # check that output_attentions also work using config del inputs_dict["output_attentions"] a : Tuple = True a : Optional[Any] = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): a : str = model(**self._prepare_for_class(A , A ) ) a : Optional[int] = outputs.attentions self.assertEqual(len(A ) , A ) # verify the first attentions (first block, first layer) a : Union[str, Any] = (self.model_tester.image_size // 4) ** 2 a : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) a : Tuple = (self.model_tester.image_size // 3_2) ** 2 a : Tuple = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) a : str = len(A ) # Check attention is always last and order is fine a : str = True a : Tuple = True a : List[str] = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): a : Dict = model(**self._prepare_for_class(A , A ) ) self.assertEqual(out_len + 1 , len(A ) ) a : str = outputs.attentions self.assertEqual(len(A ) , A ) # verify the first attentions (first block, first layer) a : Union[str, Any] = (self.model_tester.image_size // 4) ** 2 a : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def lowerCamelCase__ ( self : int ): '''simple docstring''' def check_hidden_states_output(A : Optional[Any] , A : List[str] , A : Union[str, Any] ): a : Optional[Any] = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): a : Optional[Any] = model(**self._prepare_for_class(A , A ) ) a : Tuple = outputs.hidden_states a : Optional[Any] = self.model_tester.num_encoder_blocks self.assertEqual(len(A ) , A ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) a, a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : List[str] = True check_hidden_states_output(A , A , A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a : str = True check_hidden_states_output(A , A , A ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return a, a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() a : List[Any] = True for model_class in self.all_model_classes: if model_class in get_values(A ): continue a : List[Any] = model_class(A ) model.to(A ) model.train() a : Tuple = self._prepare_for_class(A , A , return_labels=A ) a : Any = model(**A ).loss loss.backward() @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase__ ( self : str ): '''simple docstring''' pass @slow def lowerCamelCase__ ( self : int ): '''simple docstring''' for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Dict = SegformerModel.from_pretrained(A ) self.assertIsNotNone(A ) def snake_case (): '''simple docstring''' a : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class snake_case ( unittest.TestCase ): @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' a : int = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A ) a : Dict = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to( A ) a : str = prepare_img() a : List[str] = image_processor(images=A , return_tensors='pt' ) a : List[str] = encoded_inputs.pixel_values.to(A ) with torch.no_grad(): a : Optional[int] = model(A ) a : Any = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape , A ) a : str = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-4 ) ) @slow def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' a : Optional[Any] = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A ) a : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained( 'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(A ) a : List[Any] = prepare_img() a : Optional[Any] = image_processor(images=A , return_tensors='pt' ) a : int = encoded_inputs.pixel_values.to(A ) with torch.no_grad(): a : Optional[Any] = model(A ) a : Tuple = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape , A ) a : Optional[Any] = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-1 ) ) @slow def lowerCamelCase__ ( self : int ): '''simple docstring''' a : str = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A ) a : Optional[int] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to( A ) a : int = prepare_img() a : Any = image_processor(images=A , return_tensors='pt' ) a : List[Any] = encoded_inputs.pixel_values.to(A ) with torch.no_grad(): a : str = model(A ) a : str = outputs.logits.detach().cpu() a : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(5_0_0, 3_0_0)] ) a : Dict = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , A ) a : int = image_processor.post_process_semantic_segmentation(outputs=A ) a : Any = torch.Size((1_2_8, 1_2_8) ) self.assertEqual(segmentation[0].shape , A )
186
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Tuple =logging.get_logger(__name__) a__ : str ={ '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int ="rwkv" SCREAMING_SNAKE_CASE_ : Optional[int] ={"max_position_embeddings": "context_length"} def __init__( self : str , __A : Optional[int]=5_0_2_7_7 , __A : Any=1_0_2_4 , __A : Optional[int]=4_0_9_6 , __A : Optional[Any]=3_2 , __A : str=None , __A : Dict=None , __A : List[str]=1e-5 , __A : Dict=0 , __A : List[Any]=0 , __A : Dict=6 , __A : Optional[Any]=False , __A : Union[str, Any]=True , **__A : Tuple , ): __UpperCamelCase = vocab_size __UpperCamelCase = context_length __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size __UpperCamelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = rescale_every __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id super().__init__( tie_word_embeddings=__A , bos_token_id=__A , eos_token_id=__A , **__A )
53
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCamelCase__ ( unittest.TestCase ): def lowerCAmelCase (self : Optional[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase (self : int ): __a , __a : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __a : Any = '''A painting of a squirrel eating a burger''' __a : Dict = jax.device_count() __a : Optional[int] = num_samples * [prompt] __a : Optional[Any] = sd_pipe.prepare_inputs(snake_case_ ) __a : Optional[Any] = replicate(snake_case_ ) __a : Optional[int] = shard(snake_case_ ) __a : int = jax.random.PRNGKey(0 ) __a : str = jax.random.split(snake_case_ , jax.device_count() ) __a : int = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=2_5 , jit=snake_case_ )[0] assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3) __a : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __a : List[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] __a : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __a : str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCAmelCase (self : Tuple ): __a : Optional[Any] = '''stabilityai/stable-diffusion-2''' __a , __a : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case_ , subfolder='''scheduler''' ) __a , __a : List[Any] = FlaxStableDiffusionPipeline.from_pretrained( snake_case_ , scheduler=snake_case_ , revision='''bf16''' , dtype=jnp.bfloataa , ) __a : Union[str, Any] = scheduler_params __a : List[Any] = '''A painting of a squirrel eating a burger''' __a : Any = jax.device_count() __a : Any = num_samples * [prompt] __a : List[Any] = sd_pipe.prepare_inputs(snake_case_ ) __a : Tuple = replicate(snake_case_ ) __a : Dict = shard(snake_case_ ) __a : Dict = jax.random.PRNGKey(0 ) __a : Dict = jax.random.split(snake_case_ , jax.device_count() ) __a : str = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=2_5 , jit=snake_case_ )[0] assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3) __a : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __a : Any = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] __a : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __a : List[Any] = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
216
0
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : int ): __A = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split() __A = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) __A = { "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } __A = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 1_60_00, "return_attention_mask": False, "do_normalize": True, } __A = tempfile.mkdtemp() __A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) __A = os.path.join(self.tmpdirname ,lowerCamelCase_ ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + "\n" ) with open(self.feature_extraction_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + "\n" ) # load decoder from hub __A = "hf-internal-testing/ngram-beam-search-decoder" def UpperCamelCase_ ( self : Optional[Any] ,**A : str ): __A = self.add_kwargs_tokens_map.copy() kwargs.update(lowerCamelCase_ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ) def UpperCamelCase_ ( self : Optional[int] ,**A : int ): return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ) def UpperCamelCase_ ( self : Any ,**A : Optional[Any] ): return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**lowerCamelCase_ ) def UpperCamelCase_ ( self : Dict ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self : Any ): __A = self.get_tokenizer() __A = self.get_feature_extractor() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ ,decoder=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) __A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,lowerCamelCase_ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,lowerCamelCase_ ) # decoder self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,) self.assertIsInstance(processor.decoder ,lowerCamelCase_ ) def UpperCamelCase_ ( self : List[str] ): __A = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match __A = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha ,5.0 ) self.assertEqual(processor.language_model.beta ,3.0 ) self.assertEqual(processor.language_model.score_boundary ,-7.0 ) self.assertEqual(processor.language_model.unk_score_offset ,3 ) def UpperCamelCase_ ( self : str ): __A = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["xx"] ) with self.assertRaisesRegex(lowerCamelCase_ ,"include" ): WavaVecaProcessorWithLM( tokenizer=lowerCamelCase_ ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) def UpperCamelCase_ ( self : Union[str, Any] ): __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ ,decoder=lowerCamelCase_ ) __A = floats_list((3, 10_00) ) __A = feature_extractor(lowerCamelCase_ ,return_tensors="np" ) __A = processor(lowerCamelCase_ ,return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self : Optional[int] ): __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ ,decoder=lowerCamelCase_ ) __A = "This is a test string" __A = processor(text=lowerCamelCase_ ) __A = tokenizer(lowerCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self : Optional[int] ,A : str=(2, 10, 16) ,A : Tuple=77 ): np.random.seed(lowerCamelCase_ ) return np.random.rand(*lowerCamelCase_ ) def UpperCamelCase_ ( self : str ): __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ ,decoder=lowerCamelCase_ ) __A = self._get_dummy_logits(shape=(10, 16) ,seed=13 ) __A = processor.decode(lowerCamelCase_ ) __A = decoder.decode_beams(lowerCamelCase_ )[0] self.assertEqual(decoded_decoder[0] ,decoded_processor.text ) self.assertEqual("</s> <s> </s>" ,decoded_processor.text ) self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score ) @parameterized.expand([[None], ["fork"], ["spawn"]] ) def UpperCamelCase_ ( self : Any ,A : Optional[int] ): __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ ,decoder=lowerCamelCase_ ) __A = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: __A = processor.batch_decode(lowerCamelCase_ ) else: with get_context(lowerCamelCase_ ).Pool() as pool: __A = processor.batch_decode(lowerCamelCase_ ,lowerCamelCase_ ) __A = list(lowerCamelCase_ ) with get_context("fork" ).Pool() as p: __A = decoder.decode_beams_batch(lowerCamelCase_ ,lowerCamelCase_ ) __A , __A , __A = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCamelCase_ ,decoded_processor.text ) self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] ,decoded_processor.text ) self.assertListEqual(lowerCamelCase_ ,decoded_processor.logit_score ) self.assertListEqual(lowerCamelCase_ ,decoded_processor.lm_score ) def UpperCamelCase_ ( self : int ): __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ ,decoder=lowerCamelCase_ ) __A = self._get_dummy_logits() __A = 15 __A = -20.0 __A = -4.0 __A = processor.batch_decode( lowerCamelCase_ ,beam_width=lowerCamelCase_ ,beam_prune_logp=lowerCamelCase_ ,token_min_logp=lowerCamelCase_ ,) __A = decoded_processor_out.text __A = list(lowerCamelCase_ ) with get_context("fork" ).Pool() as pool: __A = decoder.decode_beams_batch( lowerCamelCase_ ,lowerCamelCase_ ,beam_width=lowerCamelCase_ ,beam_prune_logp=lowerCamelCase_ ,token_min_logp=lowerCamelCase_ ,) __A = [d[0][0] for d in decoded_decoder_out] __A = [d[0][2] for d in decoded_decoder_out] __A = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] ,lowerCamelCase_ ) self.assertTrue(np.array_equal(lowerCamelCase_ ,decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.0_54, -18.4_47] ,lowerCamelCase_ ,atol=1E-3 ) ) self.assertTrue(np.array_equal(lowerCamelCase_ ,decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.5_54, -13.94_74] ,lowerCamelCase_ ,atol=1E-3 ) ) def UpperCamelCase_ ( self : Any ): __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ ,decoder=lowerCamelCase_ ) __A = self._get_dummy_logits() __A = 2.0 __A = 5.0 __A = -20.0 __A = True __A = processor.batch_decode( lowerCamelCase_ ,alpha=lowerCamelCase_ ,beta=lowerCamelCase_ ,unk_score_offset=lowerCamelCase_ ,lm_score_boundary=lowerCamelCase_ ,) __A = decoded_processor_out.text __A = list(lowerCamelCase_ ) decoder.reset_params( alpha=lowerCamelCase_ ,beta=lowerCamelCase_ ,unk_score_offset=lowerCamelCase_ ,lm_score_boundary=lowerCamelCase_ ,) with get_context("fork" ).Pool() as pool: __A = decoder.decode_beams_batch( lowerCamelCase_ ,lowerCamelCase_ ,) __A = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] ,lowerCamelCase_ ) __A = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha ,2.0 ) self.assertEqual(lm_model.beta ,5.0 ) self.assertEqual(lm_model.unk_score_offset ,-20.0 ) self.assertEqual(lm_model.score_boundary ,lowerCamelCase_ ) def UpperCamelCase_ ( self : Dict ): __A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) __A = processor.decoder.model_container[processor.decoder._model_key] __A = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() __A = os.listdir(lowerCamelCase_ ) __A = ["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) def UpperCamelCase_ ( self : int ): __A = snapshot_download("hf-internal-testing/processor_with_lm" ) __A = WavaVecaProcessorWithLM.from_pretrained(lowerCamelCase_ ) __A = processor.decoder.model_container[processor.decoder._model_key] __A = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() __A = os.listdir(lowerCamelCase_ ) __A = os.listdir(lowerCamelCase_ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) def UpperCamelCase_ ( self : int ): __A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) __A = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" ) __A = floats_list((3, 10_00) ) __A = processor_wavaveca(lowerCamelCase_ ,return_tensors="np" ) __A = processor_auto(lowerCamelCase_ ,return_tensors="np" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 ) __A = self._get_dummy_logits() __A = processor_wavaveca.batch_decode(lowerCamelCase_ ) __A = processor_auto.batch_decode(lowerCamelCase_ ) self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text ) def UpperCamelCase_ ( self : str ): __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ ,decoder=lowerCamelCase_ ) self.assertListEqual( processor.model_input_names ,feature_extractor.model_input_names ,msg="`processor` and `feature_extractor` model input names do not match" ,) @staticmethod def UpperCamelCase_ ( A : List[str] ,A : Tuple ): __A = [d[key] for d in offsets] return retrieved_list def UpperCamelCase_ ( self : List[str] ): __A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) __A = self._get_dummy_logits()[0] __A = processor.decode(lowerCamelCase_ ,output_word_offsets=lowerCamelCase_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(lowerCamelCase_ ,lowerCamelCase_ ) ) self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] ,"word" ) ) ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] ,"word" ) ,["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] ,"start_offset" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] ,"end_offset" ) ,[1, 3, 5] ) def UpperCamelCase_ ( self : int ): __A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) __A = self._get_dummy_logits() __A = processor.batch_decode(lowerCamelCase_ ,output_word_offsets=lowerCamelCase_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(lowerCamelCase_ ,lowerCamelCase_ ) ) self.assertListEqual( [" ".join(self.get_from_offsets(lowerCamelCase_ ,"word" ) ) for o in outputs["word_offsets"]] ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] ,"word" ) ,["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] ,"start_offset" ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] ,"end_offset" ) ,[1, 3, 5] ) @slow @require_torch @require_torchaudio def UpperCamelCase_ ( self : Tuple ): import torch __A = load_dataset("common_voice" ,"en" ,split="train" ,streaming=lowerCamelCase_ ) __A = ds.cast_column("audio" ,datasets.Audio(sampling_rate=1_60_00 ) ) __A = iter(lowerCamelCase_ ) __A = next(lowerCamelCase_ ) __A = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) __A = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train __A = processor(sample["audio"]["array"] ,return_tensors="pt" ).input_values with torch.no_grad(): __A = model(lowerCamelCase_ ).logits.cpu().numpy() __A = processor.decode(logits[0] ,output_word_offsets=lowerCamelCase_ ) __A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate __A = [ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] __A = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" # output words self.assertEqual(" ".join(self.get_from_offsets(lowerCamelCase_ ,"word" ) ) ,lowerCamelCase_ ) self.assertEqual(" ".join(self.get_from_offsets(lowerCamelCase_ ,"word" ) ) ,output.text ) # output times __A = torch.tensor(self.get_from_offsets(lowerCamelCase_ ,"start_time" ) ) __A = torch.tensor(self.get_from_offsets(lowerCamelCase_ ,"end_time" ) ) # fmt: off __A = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] ) __A = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] ) # fmt: on self.assertTrue(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=0.01 ) ) self.assertTrue(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=0.01 ) )
355
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) snake_case_ = Features({"text": Value("string" )} ) snake_case_ = Features({"labels": ClassLabel} ) snake_case_ = "text" snake_case_ = "labels" def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] ,A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) __A = copy.deepcopy(self ) __A = self.label_schema.copy() __A = features[self.label_column] __A = label_schema return task_template @property def UpperCamelCase_ ( self : Dict ): return { self.text_column: "text", self.label_column: "labels", }
124
0
'''simple docstring''' import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def _lowerCamelCase ( ) -> Dict: _a = argparse.ArgumentParser() parser.add_argument("--model_ckpt" , type=_a , default="microsoft/unixcoder-base-nine" ) parser.add_argument("--num_epochs" , type=_a , default=5 ) parser.add_argument("--batch_size" , type=_a , default=6 ) parser.add_argument("--gradient_accumulation_steps" , type=_a , default=1 ) parser.add_argument("--freeze" , type=_a , default=_a ) parser.add_argument("--learning_rate" , type=_a , default=5E-4 ) parser.add_argument("--seed" , type=_a , default=0 ) parser.add_argument("--lr_scheduler_type" , type=_a , default="cosine" ) parser.add_argument("--num_warmup_steps" , type=_a , default=10 ) parser.add_argument("--weight_decay" , type=_a , default=0.01 ) parser.add_argument("--output_dir" , type=_a , default="./results" ) return parser.parse_args() lowerCAmelCase_ : List[str] = load('accuracy') def _lowerCamelCase ( lowercase : Dict ) -> Optional[Any]: _a = eval_pred _a = np.argmax(_a , axis=1 ) return metric.compute(predictions=_a , references=_a ) class __SCREAMING_SNAKE_CASE (_lowercase ): """simple docstring""" def __init__( self : str , __a : List[str] ): super().__init__() _a = trainer def UpperCamelCase__ ( self : Any , __a : List[str] , __a : Tuple , __a : Any , **__a : Optional[int] ): if control.should_evaluate: _a = deepcopy(_SCREAMING_SNAKE_CASE ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" ) return control_copy def _lowerCamelCase ( ) -> int: _a = get_args() set_seed(args.seed ) _a = load_dataset("codeparrot/codecomplex" , split="train" ) _a = dataset.train_test_split(test_size=0.2 ) _a = train_test['''test'''].train_test_split(test_size=0.5 ) _a = DatasetDict( { "train": train_test["train"], "test": test_validation["train"], "valid": test_validation["test"], } ) print("Loading tokenizer and model" ) _a = AutoTokenizer.from_pretrained(args.model_ckpt ) _a = tokenizer.eos_token _a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) _a = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _a = False _a = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) ) def tokenize(lowercase : Dict ): _a = tokenizer(example["src"] , truncation=_a , max_length=1024 ) _a = labels.straint(example["complexity"] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _a = train_test_validation.map( _a , batched=_a , remove_columns=train_test_validation["train"].column_names , ) _a = DataCollatorWithPadding(tokenizer=_a ) _a = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , ) _a = Trainer( model=_a , args=_a , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_a , data_collator=_a , compute_metrics=_a , ) print("Training..." ) trainer.add_callback(CustomCallback(_a ) ) trainer.train() if __name__ == "__main__": main()
63
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase = logging.get_logger() @dataclass class _a : _a : nn.Module _a : List[nn.Module] = field(default_factory=_lowercase) _a : list = field(default_factory=_lowercase) def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tensor , _SCREAMING_SNAKE_CASE : Tensor )-> Any: lowerCAmelCase__ : str = len(list(m.modules() ) ) == 1 or isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(_SCREAMING_SNAKE_CASE , nn.BatchNormad ) if has_not_submodules: self.traced.append(_SCREAMING_SNAKE_CASE ) def __call__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tensor )-> str: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_SCREAMING_SNAKE_CASE ) [x.remove() for x in self.handles] return self @property def UpperCAmelCase__( self : Any )-> Union[str, Any]: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda _SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class _a : _a : nn.Module _a : nn.Module _a : int = 1 _a : List = field(default_factory=_lowercase) _a : List = field(default_factory=_lowercase) _a : bool = True def __call__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tensor )-> str: lowerCAmelCase__ : List[Any] = Tracker(self.dest )(_SCREAMING_SNAKE_CASE ).parametrized lowerCAmelCase__ : str = Tracker(self.src )(_SCREAMING_SNAKE_CASE ).parametrized lowerCAmelCase__ : List[str] = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.src_skip , _SCREAMING_SNAKE_CASE ) ) lowerCAmelCase__ : str = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.dest_skip , _SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ) and self.raise_if_mismatch: raise Exception( F'Numbers of operations are different. Source module has {len(_SCREAMING_SNAKE_CASE )} operations while' F' destination module has {len(_SCREAMING_SNAKE_CASE )}.' ) for dest_m, src_m in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'Transfered from={src_m} to={dest_m}' ) class _a ( nn.Module): def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : nn.Module )-> Optional[int]: super().__init__() lowerCAmelCase__ : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(('''conv1''', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('''block''' ), F'Unexpected layer name {k}' lowerCAmelCase__ : Optional[int] = len(_SCREAMING_SNAKE_CASE ) + 1 feature_blocks.append((F'res{block_index}', v) ) lowerCAmelCase__ : List[str] = nn.ModuleDict(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Tensor )-> List[str]: return get_trunk_forward_outputs( _SCREAMING_SNAKE_CASE , out_feat_keys=_SCREAMING_SNAKE_CASE , feature_blocks=self._feature_blocks , ) class _a ( _lowercase): def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )-> str: lowerCAmelCase__ : int = x.split('''-''' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str )-> Callable[[], Tuple[nn.Module, Dict]]: # default to timm! if x not in self: lowerCAmelCase__ : Optional[Any] = self.convert_name_to_timm(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : List[str] = partial(lambda: (timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ).eval(), None) ) else: lowerCAmelCase__ : Any = super().__getitem__(_SCREAMING_SNAKE_CASE ) return val class _a ( _lowercase): def __getitem__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str )-> Callable[[], nn.Module]: if "seer" in x and "in1k" not in x: lowerCAmelCase__ : int = RegNetModel else: lowerCAmelCase__ : List[str] = RegNetForImageClassification return val def lowerCamelCase_ ( _a , _a , _a ): """simple docstring""" for from_key, to_key in keys: lowerCAmelCase__ : Optional[Any] = from_state_dict[from_key].clone() print(f'Copied key={from_key} to={to_key}' ) return to_state_dict def lowerCamelCase_ ( _a , _a , _a , _a , _a , _a = True , ): """simple docstring""" print(f'Converting {name}...' ) with torch.no_grad(): lowerCAmelCase__ , lowerCAmelCase__ : int = from_model_func() lowerCAmelCase__ : Optional[Any] = our_model_func(_a ).eval() lowerCAmelCase__ : int = ModuleTransfer(src=_a , dest=_a , raise_if_mismatch=_a ) lowerCAmelCase__ : str = torch.randn((1, 3, 224, 224) ) module_transfer(_a ) if from_state_dict is not None: lowerCAmelCase__ : Any = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: lowerCAmelCase__ : List[Any] = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')] lowerCAmelCase__ : int = manually_copy_vissl_head(_a , our_model.state_dict() , _a ) our_model.load_state_dict(_a ) lowerCAmelCase__ : List[str] = our_model(_a , output_hidden_states=_a ) lowerCAmelCase__ : Dict = ( our_outputs.logits if isinstance(_a , _a ) else our_outputs.last_hidden_state ) lowerCAmelCase__ : Tuple = from_model(_a ) lowerCAmelCase__ : int = from_output[-1] if type(_a ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: lowerCAmelCase__ : Optional[int] = our_outputs.hidden_states[-1] assert torch.allclose(_a , _a ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_a , ) lowerCAmelCase__ : Optional[int] = 224 if '''seer''' not in name else 384 # we can use the convnext one lowerCAmelCase__ : int = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_a ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_a , ) print(f'Pushed {name}' ) def lowerCamelCase_ ( _a , _a = None , _a = True ): """simple docstring""" lowerCAmelCase__ : str = '''imagenet-1k-id2label.json''' lowerCAmelCase__ : Dict = 1_000 lowerCAmelCase__ : Optional[int] = (1, num_labels) lowerCAmelCase__ : Optional[int] = '''huggingface/label-files''' lowerCAmelCase__ : Tuple = num_labels lowerCAmelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type='''dataset''' ) ) , '''r''' ) ) lowerCAmelCase__ : Dict = {int(_a ): v for k, v in idalabel.items()} lowerCAmelCase__ : List[Any] = idalabel lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} lowerCAmelCase__ : Dict = partial(_a , num_labels=_a , idalabel=_a , labelaid=_a ) lowerCAmelCase__ : Tuple = { '''regnet-x-002''': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ), '''regnet-x-004''': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ), '''regnet-x-006''': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ), '''regnet-x-008''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ), '''regnet-x-016''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ), '''regnet-x-032''': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='''x''' ), '''regnet-x-040''': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='''x''' ), '''regnet-x-064''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='''x''' ), '''regnet-x-080''': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='''x''' ), '''regnet-x-120''': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='''x''' ), '''regnet-x-160''': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='''x''' ), '''regnet-x-320''': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='''x''' ), # y variant '''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), '''regnet-y-004''': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), '''regnet-y-006''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), '''regnet-y-008''': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), '''regnet-y-016''': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), '''regnet-y-032''': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ), '''regnet-y-040''': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ), '''regnet-y-064''': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ), '''regnet-y-080''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ), '''regnet-y-120''': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ), '''regnet-y-160''': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ), '''regnet-y-320''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 '''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ), '''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ), '''regnet-y-1280-seer''': RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ), '''regnet-y-2560-seer''': RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ), '''regnet-y-10b-seer''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ), # finetuned on imagenet '''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ), '''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ), '''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ), '''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ), '''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ), } lowerCAmelCase__ : Optional[Any] = NameToOurModelFuncMap() lowerCAmelCase__ : Optional[Any] = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(_a , _a ) -> Tuple[nn.Module, Dict]: lowerCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(_a , model_dir=str(_a ) , map_location='''cpu''' ) lowerCAmelCase__ : int = model_func() # check if we have a head, if yes add it lowerCAmelCase__ : int = files['''classy_state_dict''']['''base_model''']['''model'''] lowerCAmelCase__ : Tuple = model_state_dict['''trunk'''] model.load_state_dict(_a ) return model.eval(), model_state_dict["heads"] # pretrained lowerCAmelCase__ : int = partial( _a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCAmelCase__ : Optional[int] = partial( _a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCAmelCase__ : Optional[int] = partial( _a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) lowerCAmelCase__ : Tuple = partial( _a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned lowerCAmelCase__ : List[Any] = partial( _a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCAmelCase__ : Optional[int] = partial( _a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCAmelCase__ : Union[str, Any] = partial( _a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) lowerCAmelCase__ : Union[str, Any] = partial( _a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( _a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _a , _a , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( _a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _a , _a , _a , ) return config, expected_shape if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported regnet* architecture,''' ''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) lowerCamelCase = parser.parse_args() lowerCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
131
0
"""simple docstring""" from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase_ ( __lowercase ): def UpperCamelCase_ ( self : List[str] ) -> Optional[Any]: _snake_case = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A__ , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(A__ , '''num_heads''' ) ) class lowercase_ : def __init__( self : List[Any] , A__ : List[str] , A__ : List[str]=13 , A__ : str=64 , A__ : Any=3 , A__ : List[Any]=[16, 48, 96] , A__ : Union[str, Any]=[1, 3, 6] , A__ : Optional[int]=[1, 2, 10] , A__ : str=[7, 3, 3] , A__ : Optional[Any]=[4, 2, 2] , A__ : List[Any]=[2, 1, 1] , A__ : Any=[2, 2, 2] , A__ : str=[False, False, True] , A__ : Optional[int]=[0.0, 0.0, 0.0] , A__ : List[Any]=0.02 , A__ : Optional[int]=1e-12 , A__ : int=True , A__ : int=True , A__ : Any=2 , ) -> Optional[Any]: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = patch_sizes _snake_case = patch_stride _snake_case = patch_padding _snake_case = is_training _snake_case = use_labels _snake_case = num_labels _snake_case = num_channels _snake_case = embed_dim _snake_case = num_heads _snake_case = stride_kv _snake_case = depth _snake_case = cls_token _snake_case = attention_drop_rate _snake_case = initializer_range _snake_case = layer_norm_eps def UpperCamelCase_ ( self : str ) -> Union[str, Any]: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: # create a random int32 tensor of given shape _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self : Tuple ) -> Optional[int]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def UpperCamelCase_ ( self : List[Any] , A__ : int , A__ : Any , A__ : int ) -> Optional[Any]: _snake_case = TFCvtModel(config=A__ ) _snake_case = model(A__ , training=A__ ) _snake_case = (self.image_size, self.image_size) _snake_case, _snake_case = image_size[0], image_size[1] for i in range(len(self.depth ) ): _snake_case = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _snake_case = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def UpperCamelCase_ ( self : Tuple , A__ : int , A__ : Dict , A__ : Optional[Any] ) -> Optional[Any]: _snake_case = self.num_labels _snake_case = TFCvtForImageClassification(A__ ) _snake_case = model(A__ , labels=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Any ) -> str: _snake_case = self.prepare_config_and_inputs() _snake_case, _snake_case, _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowercase_ ( __lowercase , __lowercase , unittest.TestCase ): UpperCamelCase_ : List[str] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () UpperCamelCase_ : int = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) UpperCamelCase_ : List[Any] = False UpperCamelCase_ : List[str] = False UpperCamelCase_ : Tuple = False UpperCamelCase_ : Any = False UpperCamelCase_ : Dict = False def UpperCamelCase_ ( self : Optional[Any] ) -> Optional[int]: _snake_case = TFCvtModelTester(self ) _snake_case = TFCvtConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 ) def UpperCamelCase_ ( self : int ) -> str: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='''Cvt does not output attentions''' ) def UpperCamelCase_ ( self : List[str] ) -> Optional[Any]: pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def UpperCamelCase_ ( self : Tuple ) -> Optional[int]: pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[int]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) def UpperCamelCase_ ( self : Any ) -> List[str]: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def UpperCamelCase_ ( self : str ) -> Dict: super().test_keras_fit() @unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' ) def UpperCamelCase_ ( self : List[Any] ) -> Tuple: _snake_case = tf.keras.mixed_precision.Policy('''mixed_float16''' ) tf.keras.mixed_precision.set_global_policy(A__ ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('''float32''' ) def UpperCamelCase_ ( self : Optional[Any] ) -> int: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(A__ ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A__ ) def UpperCamelCase_ ( self : Optional[int] ) -> Union[str, Any]: def check_hidden_states_output(A__ : Union[str, Any] , A__ : List[Any] , A__ : Optional[Any] ): _snake_case = model_class(A__ ) _snake_case = model(**self._prepare_for_class(A__ , A__ ) ) _snake_case = outputs.hidden_states _snake_case = len(self.model_tester.depth ) self.assertEqual(len(A__ ) , A__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(A__ , A__ , A__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(A__ , A__ , A__ ) def UpperCamelCase_ ( self : Optional[int] ) -> Any: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase_ ( self : int ) -> List[str]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) @slow def UpperCamelCase_ ( self : List[Any] ) -> Optional[int]: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFCvtModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) def snake_case_() -> List[str]: """simple docstring""" _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowercase_ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : Tuple ) -> Tuple: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def UpperCamelCase_ ( self : Dict ) -> Optional[Any]: _snake_case = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=A__ , return_tensors='''tf''' ) # forward pass _snake_case = model(**A__ ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , A__ ) _snake_case = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A__ , atol=1e-4 ) )
368
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _snake_case = str(bin(_UpperCamelCase ) )[2:] # remove the leading "0b" _snake_case = str(bin(_UpperCamelCase ) )[2:] _snake_case = max(len(_UpperCamelCase ) , len(_UpperCamelCase ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(_UpperCamelCase ) , b_binary.zfill(_UpperCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
278
0
"""simple docstring""" def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ): '''simple docstring''' lowerCAmelCase = 0 for ch in input_str: lowerCAmelCase = ord(SCREAMING_SNAKE_CASE ) lowerCAmelCase = pow(2 , SCREAMING_SNAKE_CASE ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
46
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = ["""image_processor""", """tokenizer"""] lowerCAmelCase_ = """BlipImageProcessor""" lowerCAmelCase_ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = False super().__init__(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = self.image_processor def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ): '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: lowerCamelCase__ = self.tokenizer lowerCamelCase__ = self.tokenizer( text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) return text_encoding # add pixel_values lowerCamelCase__ = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase ) if text is not None: lowerCamelCase__ = self.tokenizer( text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) else: lowerCamelCase__ = None if text_encoding is not None: encoding_image_processor.update(__lowerCAmelCase ) return encoding_image_processor def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.tokenizer.model_input_names lowerCamelCase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
209
0
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCamelCase_ : List[Any] = logging.get_logger(__name__) lowerCamelCase_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase_ : Optional[Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCamelCase_ : Dict = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCamelCase_ : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCamelCase_ : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 5_12, """facebook/dpr-ctx_encoder-multiset-base""": 5_12, } lowerCamelCase_ : str = { """facebook/dpr-question_encoder-single-nq-base""": 5_12, """facebook/dpr-question_encoder-multiset-base""": 5_12, } lowerCamelCase_ : Tuple = { """facebook/dpr-reader-single-nq-base""": 5_12, """facebook/dpr-reader-multiset-base""": 5_12, } lowerCamelCase_ : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCamelCase_ : Tuple = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCamelCase_ : Any = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class _UpperCamelCase ( _A ): '''simple docstring''' __UpperCamelCase : int = VOCAB_FILES_NAMES __UpperCamelCase : str = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION __UpperCamelCase : Union[str, Any] = DPRContextEncoderTokenizer class _UpperCamelCase ( _A ): '''simple docstring''' __UpperCamelCase : str = VOCAB_FILES_NAMES __UpperCamelCase : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : List[str] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __UpperCamelCase : Tuple = DPRQuestionEncoderTokenizer lowerCamelCase_ : Optional[Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) lowerCamelCase_ : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) lowerCamelCase_ : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(_A ) class _UpperCamelCase : '''simple docstring''' def __call__( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Union[bool, str] = False , snake_case_ : Union[bool, str] = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[bool] = None , **snake_case_ : int , ): if titles is None and texts is None: return super().__call__( snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , ) elif titles is None or texts is None: UpperCamelCase_: Dict = titles if texts is None else texts return super().__call__( snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , ) UpperCamelCase_: Union[str, Any] = titles if not isinstance(snake_case_ , snake_case_ ) else [titles] UpperCamelCase_: List[str] = texts if not isinstance(snake_case_ , snake_case_ ) else [texts] UpperCamelCase_: List[Any] = len(snake_case_ ) UpperCamelCase_: Optional[int] = questions if not isinstance(snake_case_ , snake_case_ ) else [questions] * n_passages assert len(snake_case_ ) == len( snake_case_ ), f'''There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts.''' UpperCamelCase_: Union[str, Any] = super().__call__(snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""] UpperCamelCase_: List[Any] = super().__call__(snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""] UpperCamelCase_: Tuple = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(snake_case_ , snake_case_ ) ] } if return_attention_mask is not False: UpperCamelCase_: Optional[int] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCamelCase_: Tuple = attention_mask return self.pad(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ ) def lowerCAmelCase__ ( self : Any , snake_case_ : BatchEncoding , snake_case_ : DPRReaderOutput , snake_case_ : int = 16 , snake_case_ : int = 64 , snake_case_ : int = 4 , ): UpperCamelCase_: Optional[Any] = reader_input["""input_ids"""] UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Any = reader_output[:3] UpperCamelCase_: Dict = len(snake_case_ ) UpperCamelCase_: Optional[int] = sorted(range(snake_case_ ) , reverse=snake_case_ , key=relevance_logits.__getitem__ ) UpperCamelCase_: List[DPRReaderOutput] = [] for doc_id in sorted_docs: UpperCamelCase_: List[str] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCamelCase_: int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCamelCase_: int = sequence_ids.index(self.pad_token_id ) else: UpperCamelCase_: List[str] = len(snake_case_ ) UpperCamelCase_: Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case_ , top_spans=snake_case_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case_ , start_index=snake_case_ , end_index=snake_case_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(snake_case_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowerCAmelCase__ ( self : Tuple , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int , snake_case_ : int , ): UpperCamelCase_: Dict = [] for start_index, start_score in enumerate(snake_case_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCamelCase_: List[str] = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ ) UpperCamelCase_: Optional[Any] = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]''' UpperCamelCase_: List[Any] = end_index - start_index + 1 assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(snake_case_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_A ) class _UpperCamelCase ( _A , _A ): '''simple docstring''' __UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES __UpperCamelCase : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION __UpperCamelCase : str = ["""input_ids""", """attention_mask"""] __UpperCamelCase : int = DPRReaderTokenizer
223
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ : Optional[int] = logging.get_logger(__name__) lowerCamelCase_ : Dict = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class _UpperCamelCase ( _A ): '''simple docstring''' __UpperCamelCase : Optional[int] = """luke""" def __init__( self : Tuple , snake_case_ : List[Any]=5_0267 , snake_case_ : Any=50_0000 , snake_case_ : str=768 , snake_case_ : int=256 , snake_case_ : str=12 , snake_case_ : int=12 , snake_case_ : Dict=3072 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : int=512 , snake_case_ : Dict=2 , snake_case_ : List[Any]=0.02 , snake_case_ : int=1e-12 , snake_case_ : Union[str, Any]=True , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=1 , snake_case_ : Optional[int]=0 , snake_case_ : List[str]=2 , **snake_case_ : Union[str, Any] , ): super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) UpperCamelCase_: Dict = vocab_size UpperCamelCase_: Tuple = entity_vocab_size UpperCamelCase_: Optional[int] = hidden_size UpperCamelCase_: Any = entity_emb_size UpperCamelCase_: str = num_hidden_layers UpperCamelCase_: Union[str, Any] = num_attention_heads UpperCamelCase_: Dict = hidden_act UpperCamelCase_: Dict = intermediate_size UpperCamelCase_: str = hidden_dropout_prob UpperCamelCase_: List[str] = attention_probs_dropout_prob UpperCamelCase_: int = max_position_embeddings UpperCamelCase_: int = type_vocab_size UpperCamelCase_: List[Any] = initializer_range UpperCamelCase_: Union[str, Any] = layer_norm_eps UpperCamelCase_: Tuple = use_entity_aware_attention UpperCamelCase_: int = classifier_dropout
223
1
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger __a: List[str] = get_logger(__name__) __a: Union[str, Any] = R""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class UpperCAmelCase : '''simple docstring''' @add_start_docstrings(__lowerCAmelCase ) def __call__( self , __lowerCAmelCase , __lowerCAmelCase ) -> jnp.ndarray: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class UpperCAmelCase : '''simple docstring''' @add_start_docstrings(__lowerCAmelCase ) def __call__( self , __lowerCAmelCase , __lowerCAmelCase ) -> jnp.ndarray: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class UpperCAmelCase ( a__ ): '''simple docstring''' @add_start_docstrings(__lowerCAmelCase ) def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> jnp.ndarray: for processor in self: lowercase__ : List[str] = inspect.signature(processor.__call__ ).parameters if len(__lowerCAmelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F"""Make sure that all the required parameters: {list(function_args.keys() )} for """ F"""{processor.__class__} are passed to the logits processor.""" ) lowercase__ : Union[str, Any] = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) else: lowercase__ : List[str] = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return scores class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase ) -> List[str]: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not (temperature > 0): raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" ) lowercase__ : Optional[Any] = temperature def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> jnp.ndarray: lowercase__ : Tuple = scores / self.temperature return scores class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase = -float('''Inf''' ) , __lowerCAmelCase = 1 ) -> Any: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (min_tokens_to_keep < 1): raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" ) lowercase__ : Any = top_p lowercase__ : Optional[int] = filter_value lowercase__ : Optional[Any] = min_tokens_to_keep def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> jnp.ndarray: lowercase__ , lowercase__ : Tuple = lax.top_k(__lowerCAmelCase , scores.shape[-1] ) lowercase__ : Optional[int] = jnp.full_like(__lowerCAmelCase , self.filter_value ) lowercase__ : Dict = jax.nn.softmax(__lowerCAmelCase , axis=-1 ).cumsum(axis=-1 ) lowercase__ : Any = cumulative_probs < self.top_p # include the token that is higher than top_p as well lowercase__ : Dict = jnp.roll(__lowerCAmelCase , 1 ) score_mask |= score_mask.at[:, 0].set(__lowerCAmelCase ) # min tokens to keep lowercase__ : int = score_mask.at[:, : self.min_tokens_to_keep].set(__lowerCAmelCase ) lowercase__ : List[str] = jnp.where(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowercase__ : Optional[int] = jax.lax.sort_key_val(__lowerCAmelCase , __lowerCAmelCase )[-1] return next_scores class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase = -float('''Inf''' ) , __lowerCAmelCase = 1 ) -> Optional[int]: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or top_k <= 0: raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" ) lowercase__ : Any = max(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ : Dict = filter_value def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> jnp.ndarray: lowercase__ , lowercase__ : Optional[Any] = scores.shape lowercase__ : Any = jnp.full(batch_size * vocab_size , self.filter_value ) lowercase__ : List[str] = min(self.top_k , scores.shape[-1] ) # Safety check lowercase__ , lowercase__ : Any = lax.top_k(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ : Optional[int] = jnp.broadcast_to((jnp.arange(__lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() lowercase__ : Optional[int] = topk_scores.flatten() lowercase__ : Any = topk_indices.flatten() + shift lowercase__ : List[Any] = next_scores_flat.at[topk_indices_flat].set(__lowerCAmelCase ) lowercase__ : Union[str, Any] = next_scores_flat.reshape(__lowerCAmelCase , __lowerCAmelCase ) return next_scores class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase ) -> Union[str, Any]: lowercase__ : List[Any] = bos_token_id def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> jnp.ndarray: lowercase__ : Union[str, Any] = jnp.full(scores.shape , -float('''inf''' ) ) lowercase__ : str = 1 - jnp.bool_(cur_len - 1 ) lowercase__ : Tuple = jnp.where(__lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowerCAmelCase ) return scores class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: lowercase__ : int = max_length lowercase__ : Tuple = eos_token_id def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> jnp.ndarray: lowercase__ : int = jnp.full(scores.shape , -float('''inf''' ) ) lowercase__ : Optional[Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 ) lowercase__ : Optional[int] = jnp.where(__lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowerCAmelCase ) return scores class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or min_length < 0: raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or eos_token_id < 0: raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" ) lowercase__ : Optional[int] = min_length lowercase__ : Any = eos_token_id def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> jnp.ndarray: # create boolean flag to decide if min length penalty should be applied lowercase__ : Any = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) lowercase__ : Any = jnp.where(__lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , __lowerCAmelCase ) return scores class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: lowercase__ : Optional[int] = list(__lowerCAmelCase ) lowercase__ : Tuple = begin_index def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: lowercase__ : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) lowercase__ : List[Any] = jnp.where(__lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , __lowerCAmelCase ) return scores class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase ) -> List[Any]: lowercase__ : Optional[int] = list(__lowerCAmelCase ) def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> jnp.ndarray: lowercase__ : Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) ) return scores class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase ) -> Any: lowercase__ : str = dict(__lowerCAmelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. lowercase__ : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: lowercase__ : Optional[int] = force_token_array.at[index].set(__lowerCAmelCase ) lowercase__ : Tuple = jnp.intaa(__lowerCAmelCase ) def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> jnp.ndarray: def _force_token(__lowerCAmelCase ): lowercase__ : str = scores.shape[0] lowercase__ : Optional[int] = self.force_token_array[generation_idx] lowercase__ : Optional[Any] = jnp.ones_like(__lowerCAmelCase , dtype=scores.dtype ) * -float('''inf''' ) lowercase__ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) lowercase__ : List[str] = lax.dynamic_update_slice(__lowerCAmelCase , __lowerCAmelCase , (0, current_token) ) return new_scores lowercase__ : Optional[Any] = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowerCAmelCase ) , lambda: scores , ) , ) return scores class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: lowercase__ : List[Any] = generate_config.eos_token_id lowercase__ : Dict = generate_config.no_timestamps_token_id lowercase__ : Any = generate_config.no_timestamps_token_id + 1 lowercase__ : Union[str, Any] = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(__lowerCAmelCase , '''max_initial_timestamp_index''' ): lowercase__ : str = generate_config.max_initial_timestamp_index else: lowercase__ : int = model_config.vocab_size if self.max_initial_timestamp_index is None: lowercase__ : List[str] = model_config.vocab_size def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: # suppress <|notimestamps|> which is handled by without_timestamps lowercase__ : Optional[Any] = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) ) def handle_pairs(__lowerCAmelCase , __lowerCAmelCase ): lowercase__ : Dict = jnp.where((cur_len - self.begin_index) >= 1 , __lowerCAmelCase , __lowerCAmelCase ) lowercase__ : List[str] = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowerCAmelCase , ) lowercase__ : List[str] = jnp.where((cur_len - self.begin_index) < 2 , __lowerCAmelCase , __lowerCAmelCase ) lowercase__ : Union[str, Any] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowerCAmelCase , __lowerCAmelCase , ) return jnp.where( __lowerCAmelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , __lowerCAmelCase , ) lowercase__ : Tuple = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ : Union[str, Any] = jnp.where(cur_len == self.begin_index , __lowerCAmelCase , __lowerCAmelCase ) lowercase__ : Union[str, Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowerCAmelCase , ) lowercase__ : Dict = self.timestamp_begin + self.max_initial_timestamp_index lowercase__ : Union[str, Any] = jnp.where( __lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , __lowerCAmelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp lowercase__ : Any = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 ) def handle_cumulative_probs(__lowerCAmelCase , __lowerCAmelCase ): lowercase__ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) lowercase__ : Optional[int] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , __lowerCAmelCase , ) lowercase__ : Tuple = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase ) return scores
198
'''simple docstring''' import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def __UpperCamelCase ( UpperCAmelCase ): return input_array.reshape((input_array.size, 1) ) def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): lowercase__ : Dict = np.nan for i in range(UpperCAmelCase ): lowercase__ : Optional[Any] = features[:, labels == i] lowercase__ : Optional[Any] = data.mean(1 ) # Centralize the data of class i lowercase__ : Dict = data - column_reshape(UpperCAmelCase ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(UpperCAmelCase , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowercase__ : List[str] = np.dot(UpperCAmelCase , centered_data.T ) return covariance_sum / features.shape[1] def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): lowercase__ : Tuple = features.mean(1 ) lowercase__ : Dict = np.nan for i in range(UpperCAmelCase ): lowercase__ : List[str] = features[:, labels == i] lowercase__ : int = data.shape[1] lowercase__ : Optional[int] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowercase__ : Optional[int] = device_data * np.dot( column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , ) return covariance_sum / features.shape[1] def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): # Check if the features have been loaded if features.any(): lowercase__ : Optional[Any] = features.mean(1 ) # Center the dataset lowercase__ : List[str] = features - np.reshape(UpperCAmelCase , (data_mean.size, 1) ) lowercase__ : Optional[Any] = np.dot(UpperCAmelCase , centered_data.T ) / features.shape[1] lowercase__ , lowercase__ : Tuple = np.linalg.eigh(UpperCAmelCase ) # Take all the columns in the reverse order (-1), and then takes only the first lowercase__ : str = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowercase__ : Tuple = np.dot(filtered_eigenvectors.T , UpperCAmelCase ) logging.info('''Principal Component Analysis computed''' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase ) logging.error('''Dataset empty''' ) raise AssertionError def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): assert classes > dimensions # Check if features have been already loaded if features.any: lowercase__ , lowercase__ : Any = eigh( covariance_between_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , covariance_within_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) lowercase__ : Optional[int] = eigenvectors[:, ::-1][:, :dimensions] lowercase__ , lowercase__ , lowercase__ : Optional[int] = np.linalg.svd(UpperCAmelCase ) lowercase__ : List[str] = svd_matrix[:, 0:dimensions] lowercase__ : str = np.dot(filtered_svd_matrix.T , UpperCAmelCase ) logging.info('''Linear Discriminant Analysis computed''' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase ) logging.error('''Dataset empty''' ) raise AssertionError def __UpperCamelCase ( ): # Create dummy dataset with 2 classes and 3 features lowercase__ : List[str] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowercase__ : Optional[Any] = np.array([0, 0, 0, 1, 1] ) lowercase__ : str = 2 lowercase__ : Dict = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(UpperCAmelCase ) as error_info: lowercase__ : int = linear_discriminant_analysis( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) if isinstance(UpperCAmelCase , np.ndarray ): raise AssertionError( '''Did not raise AssertionError for dimensions > classes''' ) assert error_info.type is AssertionError def __UpperCamelCase ( ): lowercase__ : Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowercase__ : int = 2 lowercase__ : Any = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] ) with pytest.raises(UpperCAmelCase ) as error_info: lowercase__ : Dict = principal_component_analysis(UpperCAmelCase , UpperCAmelCase ) if not np.allclose(UpperCAmelCase , UpperCAmelCase ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
198
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __A ( a ): __A = """Salesforce/blip-image-captioning-base""" __A = ( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) __A = """image_captioner""" __A = AutoModelForVisionaSeq __A = ["""image"""] __A = ["""text"""] def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): requires_backends(self , ["""vision"""] ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ ) def _snake_case ( self , UpperCAmelCase_ ): return self.pre_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ) def _snake_case ( self , UpperCAmelCase_ ): return self.model.generate(**UpperCAmelCase_ ) def _snake_case ( self , UpperCAmelCase_ ): return self.pre_processor.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )[0].strip()
262
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _lowercase ( ) -> str: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_UpperCAmelCase ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def _lowercase ( ) -> Union[str, Any]: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def _lowercase ( ) -> int: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_UpperCAmelCase ): http_head("""https://huggingface.co""" )
262
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = SwinConfig( embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , ) UpperCAmelCase_ = DetaConfig( backbone_config=snake_case_ , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=snake_case_ , with_box_refine=snake_case_ , two_stage=snake_case_ , ) # set labels UpperCAmelCase_ = "huggingface/label-files" if "o365" in model_name: UpperCAmelCase_ = 3_66 UpperCAmelCase_ = "object365-id2label.json" else: UpperCAmelCase_ = 91 UpperCAmelCase_ = "coco-detection-id2label.json" UpperCAmelCase_ = num_labels UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type="dataset" ) ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = [] # stem # fmt: off rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") ) rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") ) rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") ) rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") ) rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") ) rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") ) rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") ) # fmt: on return rename_keys def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : int , snake_case_ : Optional[int] ) -> int: '''simple docstring''' UpperCAmelCase_ = dct.pop(snake_case_ ) UpperCAmelCase_ = val def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): UpperCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" ) UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:dim, :] UpperCAmelCase_ = in_proj_bias[: dim] UpperCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] UpperCAmelCase_ = in_proj_bias[ dim : dim * 2 ] UpperCAmelCase_ = in_proj_weight[ -dim :, : ] UpperCAmelCase_ = in_proj_bias[-dim :] # fmt: on def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:hidden_size, :] UpperCAmelCase_ = in_proj_bias[:hidden_size] UpperCAmelCase_ = in_proj_weight[ hidden_size : hidden_size * 2, : ] UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2] UpperCAmelCase_ = in_proj_weight[-hidden_size:, :] UpperCAmelCase_ = in_proj_bias[-hidden_size:] def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : str ) -> int: '''simple docstring''' UpperCAmelCase_ = get_deta_config(snake_case_ ) # load original state dict if model_name == "deta-swin-large": UpperCAmelCase_ = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" ) elif model_name == "deta-swin-large-o365": UpperCAmelCase_ = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" ) else: raise ValueError(f"""Model name {model_name} not supported""" ) UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )["model"] # original state dict for name, param in state_dict.items(): print(snake_case_ , param.shape ) # rename keys UpperCAmelCase_ = create_rename_keys(snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) read_in_swin_q_k_v(snake_case_ , config.backbone_config ) read_in_decoder_q_k_v(snake_case_ , snake_case_ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val if "input_proj" in key: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val # finally, create HuggingFace model and load state dict UpperCAmelCase_ = DetaForObjectDetection(snake_case_ ) model.load_state_dict(snake_case_ ) model.eval() UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" model.to(snake_case_ ) # load image processor UpperCAmelCase_ = DetaImageProcessor(format="coco_detection" ) # verify our conversion on image UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = processor(images=snake_case_ , return_tensors="pt" ) UpperCAmelCase_ = encoding["pixel_values"] UpperCAmelCase_ = model(pixel_values.to(snake_case_ ) ) # verify logits print("Logits:" , outputs.logits[0, :3, :3] ) print("Boxes:" , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": UpperCAmelCase_ = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) UpperCAmelCase_ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": UpperCAmelCase_ = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) UpperCAmelCase_ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case_ ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case_ ) , atol=1E-4 ) print("Everything ok!" ) if pytorch_dump_folder_path: # Save model and processor logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) # Push to hub if push_to_hub: print("Pushing model and processor to hub..." ) model.push_to_hub(f"""jozhang97/{model_name}""" ) processor.push_to_hub(f"""jozhang97/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser() parser.add_argument( '--model_name', type=str, default='deta-swin-large', choices=['deta-swin-large', 'deta-swin-large-o365'], help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
1
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__) @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : str a__ : str a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : List[int] a__ : Optional[List[int]] = None a__ : Optional[List[int]] = None a__ : Optional[Union[int, float]] = None a__ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class __A ( UpperCamelCase__ ): a__ : List[InputFeatures] def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = os.path.join( __a , "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , ) UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCAmelCase_ = cached_features_file + ".lock" with FileLock(__a ): if os.path.exists(__a ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) UpperCAmelCase_ = torch.load(__a ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) UpperCAmelCase_ = ( processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) ) logger.info("Training examples: %s" , len(__a ) ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) logger.info("Saving features into cached file %s" , __a ) torch.save(self.features , __a ) def __len__(self : List[Any] ): return len(self.features ) def __getitem__(self : Any , __a : Optional[Any] ): return self.features[i] def _lowercase (self : Union[str, Any] ): return self.label_list if is_tf_available(): import tensorflow as tf class __A : a__ : List[InputFeatures] def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(__a )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) UpperCAmelCase_ = tf.data.Dataset.from_generator( __a , ( { "example_id": tf.intaa, "input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa, }, tf.intaa, ) , ( { "example_id": tf.TensorShape([] ), "input_ids": tf.TensorShape([None, None] ), "attention_mask": tf.TensorShape([None, None] ), "token_type_ids": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _lowercase (self : int ): return self.dataset def __len__(self : Any ): return len(self.features ) def __getitem__(self : int , __a : Union[str, Any] ): return self.features[i] def _lowercase (self : int ): return self.label_list class __A ( UpperCamelCase__ ): def _lowercase (self : List[Any] , __a : Dict ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" ) def _lowercase (self : Any , __a : List[Any] ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" ) def _lowercase (self : Any ): return ["contradiction", "entailment", "neutral"] def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ): UpperCAmelCase_ = [] for i, line in enumerate(__a ): if i == 0: continue UpperCAmelCase_ = "%s-%s" % (set_type, line[0]) UpperCAmelCase_ = line[5] UpperCAmelCase_ = line[6] UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7] UpperCAmelCase_ = line[0] examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) ) return examples def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )} UpperCAmelCase_ = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ): if ex_index % 1_00_00 == 0: logger.info("Writing example %d" % (ex_index) ) UpperCAmelCase_ = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0 UpperCAmelCase_ = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info("*** Example ***" ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE_: int ={ 'hans': 3, } SCREAMING_SNAKE_CASE_: Any ={ 'hans': HansProcessor, }
1
1
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : Tuple = logging.get_logger(__name__) def __magic_name__( lowerCamelCase): __lowerCAmelCase = SwinConfig.from_pretrained( '''microsoft/swin-tiny-patch4-window7-224''', out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4''']) __lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCamelCase) __lowerCAmelCase = '''huggingface/label-files''' if "ade20k-full" in model_name: # this should be ok __lowerCAmelCase = 8_4_7 __lowerCAmelCase = '''maskformer-ade20k-full-id2label.json''' elif "ade" in model_name: # this should be ok __lowerCAmelCase = 1_5_0 __lowerCAmelCase = '''ade20k-id2label.json''' elif "coco-stuff" in model_name: # this should be ok __lowerCAmelCase = 1_7_1 __lowerCAmelCase = '''maskformer-coco-stuff-id2label.json''' elif "coco" in model_name: # TODO __lowerCAmelCase = 1_3_3 __lowerCAmelCase = '''coco-panoptic-id2label.json''' elif "cityscapes" in model_name: # this should be ok __lowerCAmelCase = 1_9 __lowerCAmelCase = '''cityscapes-id2label.json''' elif "vistas" in model_name: # this should be ok __lowerCAmelCase = 6_5 __lowerCAmelCase = '''mapillary-vistas-id2label.json''' __lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type='''dataset'''), '''r''')) __lowerCAmelCase = {int(lowerCamelCase): v for k, v in idalabel.items()} return config def __magic_name__( lowerCamelCase): __lowerCAmelCase = [] # stem # fmt: off rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''')) rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''')) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''')) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''')) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""")) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""")) if i < 3: rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""")) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""")) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""")) rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""")) rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""")) # FPN rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''')) rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''')) rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''')) for source_index, target_index in zip(range(3, 0, -1), range(0, 3)): rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""")) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""")) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""")) rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""")) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""")) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""")) rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''')) rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''')) # Transformer decoder for idx in range(config.decoder_config.decoder_layers): # self-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""")) # cross-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""")) # MLP 1 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""")) # MLP 2 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""")) # layernorm 1 (self-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""")) # layernorm 2 (cross-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""")) # layernorm 3 (final layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""")) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""")) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''')) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''')) # heads on top rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''')) rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''')) rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''')) rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''')) rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''')) for i in range(3): rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""")) rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""")) # fmt: on return rename_keys def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = dct.pop(lowerCamelCase) __lowerCAmelCase = val def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))] for i in range(len(backbone_config.depths)): __lowerCAmelCase = num_features[i] for j in range(backbone_config.depths[i]): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""") __lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""") # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[:dim, :] __lowerCAmelCase = in_proj_bias[: dim] __lowerCAmelCase = in_proj_weight[ dim : dim * 2, : ] __lowerCAmelCase = in_proj_bias[ dim : dim * 2 ] __lowerCAmelCase = in_proj_weight[ -dim :, : ] __lowerCAmelCase = in_proj_bias[-dim :] # fmt: on def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""") __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""") # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[: hidden_size, :] __lowerCAmelCase = in_proj_bias[:config.hidden_size] __lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] __lowerCAmelCase = in_proj_weight[-hidden_size :, :] __lowerCAmelCase = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""") __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""") # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[: hidden_size, :] __lowerCAmelCase = in_proj_bias[:config.hidden_size] __lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] __lowerCAmelCase = in_proj_weight[-hidden_size :, :] __lowerCAmelCase = in_proj_bias[-hidden_size :] # fmt: on def __magic_name__( ): __lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw) return im @torch.no_grad() def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False): __lowerCAmelCase = get_maskformer_config(lowerCamelCase) # load original state_dict with open(lowerCamelCase, '''rb''') as f: __lowerCAmelCase = pickle.load(lowerCamelCase) __lowerCAmelCase = data['''model'''] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __lowerCAmelCase = create_rename_keys(lowerCamelCase) for src, dest in rename_keys: rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase) read_in_swin_q_k_v(lowerCamelCase, config.backbone_config) read_in_decoder_q_k_v(lowerCamelCase, lowerCamelCase) # update to torch tensors for key, value in state_dict.items(): __lowerCAmelCase = torch.from_numpy(lowerCamelCase) # load 🤗 model __lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCamelCase) model.eval() for name, param in model.named_parameters(): print(lowerCamelCase, param.shape) __lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(lowerCamelCase) == 0, F"""Unexpected keys: {unexpected_keys}""" # verify results __lowerCAmelCase = prepare_img() if "vistas" in model_name: __lowerCAmelCase = 6_5 elif "cityscapes" in model_name: __lowerCAmelCase = 6_5_5_3_5 else: __lowerCAmelCase = 2_5_5 __lowerCAmelCase = True if '''ade''' in model_name else False __lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCamelCase, reduce_labels=lowerCamelCase) __lowerCAmelCase = image_processor(lowerCamelCase, return_tensors='''pt''') __lowerCAmelCase = model(**lowerCamelCase) print('''Logits:''', outputs.class_queries_logits[0, :3, :3]) if model_name == "maskformer-swin-tiny-ade": __lowerCAmelCase = torch.tensor( [[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]]) assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCamelCase, atol=1E-4) print('''Looks ok!''') if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""") Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) model.save_pretrained(lowerCamelCase) image_processor.save_pretrained(lowerCamelCase) if push_to_hub: print('''Pushing model and image processor to the hub...''') model.push_to_hub(F"""nielsr/{model_name}""") image_processor.push_to_hub(F"""nielsr/{model_name}""") if __name__ == "__main__": _UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _UpperCAmelCase : List[Any] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
352
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 ) __lowerCAmelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = tokenizer(__lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
9
0
"""simple docstring""" def lowercase_ ( __UpperCAmelCase ) -> int: lowerCAmelCase__ : list[list[int]] = [[0 for _ in range(__UpperCAmelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): lowerCAmelCase__ : Any = 1 for n in range(m + 1 ): for k in range(1 , __UpperCAmelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _A = int(input("""Enter a number: """).strip()) print(partition(n)) except ValueError: print("""Please enter a number.""") else: try: _A = int(sys.argv[1]) print(partition(n)) except ValueError: print("""Please pass a number.""")
242
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowercase_ ( ) -> Optional[int]: lowerCAmelCase__ : Dict = { """repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""], """path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""], """content""": ["""a """ * 20, """a """ * 30, """b """ * 7], } lowerCAmelCase__ : int = Dataset.from_dict(__UpperCAmelCase ) return dataset class _lowerCamelCase ( a_ ): def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" lowerCAmelCase__ : Dict = get_dataset() lowerCAmelCase__ : Optional[int] = make_duplicate_clusters(UpperCamelCase , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def _lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" lowerCAmelCase__ : List[Any] = get_dataset() lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = deduplicate_dataset(UpperCamelCase ) self.assertEqual(len(UpperCamelCase ) , 2 ) print(UpperCamelCase ) self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 ) self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , UpperCamelCase )
242
1
'''simple docstring''' import inspect import unittest class a_ ( unittest.TestCase ): def A__ ( self ) -> Tuple: """simple docstring""" try: import diffusers # noqa: F401 except ImportError: assert False def A__ ( self ) -> int: """simple docstring""" import diffusers from diffusers.dependency_versions_table import deps UpperCamelCase = inspect.getmembers(_SCREAMING_SNAKE_CASE , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": UpperCamelCase = """k-diffusion""" elif backend == "invisible_watermark": UpperCamelCase = """invisible-watermark""" assert backend in deps, F"{backend} is not in the deps table!"
183
'''simple docstring''' import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput SCREAMING_SNAKE_CASE__ = 'scheduler_config.json' class a_ ( lowerCamelCase ): lowercase = 1 lowercase = 2 lowercase = 3 lowercase = 4 lowercase = 5 lowercase = 6 lowercase = 7 lowercase = 8 lowercase = 9 lowercase = 10 lowercase = 11 lowercase = 12 lowercase = 13 lowercase = 14 @dataclass class a_ ( lowerCamelCase ): lowercase = 42 class a_ : lowercase = SCHEDULER_CONFIG_NAME lowercase = [] lowercase = True @classmethod def A__ ( cls , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" UpperCamelCase ,UpperCamelCase ,UpperCamelCase = cls.load_config( pretrained_model_name_or_path=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE , return_commit_hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) return cls.from_config(_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" self.save_config(save_directory=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def A__ ( self ) -> Tuple: """simple docstring""" return self._get_compatibles() @classmethod def A__ ( cls ) -> List[Any]: """simple docstring""" UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) ) UpperCamelCase = importlib.import_module(__name__.split(""".""" )[0] ) UpperCamelCase = [ getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ] return compatible_classes
183
1
'''simple docstring''' from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def UpperCamelCase_( snake_case : Dict , snake_case : Dict ): '''simple docstring''' snake_case_ = k_size // 2 snake_case_ , snake_case_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center] snake_case_ = 1 / (2 * pi * sigma) * exp(-(square(snake_case ) + square(snake_case )) / (2 * square(snake_case )) ) return g def UpperCamelCase_( snake_case : List[str] , snake_case : Any , snake_case : List[Any] ): '''simple docstring''' snake_case_ , snake_case_ = image.shape[0], image.shape[1] # dst image height and width snake_case_ = height - k_size + 1 snake_case_ = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows snake_case_ = zeros((dst_height * dst_width, k_size * k_size) ) snake_case_ = 0 for i, j in product(range(snake_case ) , range(snake_case ) ): snake_case_ = ravel(image[i : i + k_size, j : j + k_size] ) snake_case_ = window row += 1 # turn the kernel into shape(k*k, 1) snake_case_ = gen_gaussian_kernel(snake_case , snake_case ) snake_case_ = ravel(snake_case ) # reshape and get the dst image snake_case_ = dot(snake_case , snake_case ).reshape(snake_case , snake_case ).astype(snake_case ) return dst if __name__ == "__main__": # read original image _SCREAMING_SNAKE_CASE : Union[str, Any] = imread(r"../image_data/lena.jpg") # turn image in gray scale value _SCREAMING_SNAKE_CASE : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _SCREAMING_SNAKE_CASE : Optional[Any] = gaussian_filter(gray, 3, sigma=1) _SCREAMING_SNAKE_CASE : Any = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
85
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): if not nums: # Makes sure that the list is not empty raise ValueError('''List is empty''' ) A_ : Optional[int] = sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
186
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase = { '''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''], '''tokenization_convbert''': ['''ConvBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''ConvBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvBertForMaskedLM''', '''ConvBertForMultipleChoice''', '''ConvBertForQuestionAnswering''', '''ConvBertForSequenceClassification''', '''ConvBertForTokenClassification''', '''ConvBertLayer''', '''ConvBertModel''', '''ConvBertPreTrainedModel''', '''load_tf_weights_in_convbert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFConvBertForMaskedLM''', '''TFConvBertForMultipleChoice''', '''TFConvBertForQuestionAnswering''', '''TFConvBertForSequenceClassification''', '''TFConvBertForTokenClassification''', '''TFConvBertLayer''', '''TFConvBertModel''', '''TFConvBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
335
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase__ = XCLIPTextConfig() # derive patch size from model name UpperCAmelCase__ = model_name.find('patch' ) UpperCAmelCase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) UpperCAmelCase__ = XCLIPVisionConfig(patch_size=snake_case__ , num_frames=snake_case__ ) if "large" in model_name: UpperCAmelCase__ = 7_68 UpperCAmelCase__ = 30_72 UpperCAmelCase__ = 12 UpperCAmelCase__ = 10_24 UpperCAmelCase__ = 40_96 UpperCAmelCase__ = 16 UpperCAmelCase__ = 24 UpperCAmelCase__ = 7_68 UpperCAmelCase__ = 30_72 if model_name == "xclip-large-patch14-16-frames": UpperCAmelCase__ = 3_36 UpperCAmelCase__ = XCLIPConfig.from_text_vision_configs(snake_case__ , snake_case__ ) if "large" in model_name: UpperCAmelCase__ = 7_68 return config def UpperCamelCase_( snake_case__: Any ) -> Tuple: # text encoder if name == "token_embedding.weight": UpperCAmelCase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": UpperCAmelCase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: UpperCAmelCase__ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: UpperCAmelCase__ = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): UpperCAmelCase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: UpperCAmelCase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: UpperCAmelCase__ = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": UpperCAmelCase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": UpperCAmelCase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): UpperCAmelCase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: UpperCAmelCase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: UpperCAmelCase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: UpperCAmelCase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: UpperCAmelCase__ = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: UpperCAmelCase__ = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: UpperCAmelCase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: UpperCAmelCase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": UpperCAmelCase__ = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): UpperCAmelCase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): UpperCAmelCase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): UpperCAmelCase__ = orig_state_dict.pop(snake_case__ ) if "attn.in_proj" in key: UpperCAmelCase__ = key.split('.' ) if key.startswith('visual' ): UpperCAmelCase__ = key_split[3] UpperCAmelCase__ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: UpperCAmelCase__ = val[ :dim, : ] UpperCAmelCase__ = val[ dim : dim * 2, : ] UpperCAmelCase__ = val[ -dim:, : ] else: UpperCAmelCase__ = val[ :dim ] UpperCAmelCase__ = val[ dim : dim * 2 ] UpperCAmelCase__ = val[ -dim: ] else: if "weight" in key: UpperCAmelCase__ = val[ :dim, : ] UpperCAmelCase__ = val[ dim : dim * 2, : ] UpperCAmelCase__ = val[ -dim:, : ] else: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[ dim : dim * 2 ] UpperCAmelCase__ = val[-dim:] elif key.startswith('mit' ): UpperCAmelCase__ = key_split[2] UpperCAmelCase__ = config.vision_config.mit_hidden_size if "weight" in key: UpperCAmelCase__ = val[:dim, :] UpperCAmelCase__ = val[dim : dim * 2, :] UpperCAmelCase__ = val[-dim:, :] else: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[dim : dim * 2] UpperCAmelCase__ = val[-dim:] else: UpperCAmelCase__ = key_split[2] UpperCAmelCase__ = config.text_config.hidden_size if "weight" in key: UpperCAmelCase__ = val[:dim, :] UpperCAmelCase__ = val[ dim : dim * 2, : ] UpperCAmelCase__ = val[-dim:, :] else: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[ dim : dim * 2 ] UpperCAmelCase__ = val[-dim:] else: UpperCAmelCase__ = rename_key(snake_case__ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: UpperCAmelCase__ = val.T UpperCAmelCase__ = val return orig_state_dict def UpperCamelCase_( snake_case__: Tuple ) -> Optional[Any]: if num_frames == 8: UpperCAmelCase__ = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: UpperCAmelCase__ = 'eating_spaghetti.npy' elif num_frames == 32: UpperCAmelCase__ = 'eating_spaghetti_32_frames.npy' UpperCAmelCase__ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case__ , repo_type='dataset' , ) UpperCAmelCase__ = np.load(snake_case__ ) return list(snake_case__ ) def UpperCamelCase_( snake_case__: Tuple , snake_case__: str=None , snake_case__: Union[str, Any]=False ) -> List[Any]: UpperCAmelCase__ = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } UpperCAmelCase__ = model_to_url[model_name] UpperCAmelCase__ = 8 if "16-frames" in model_name: UpperCAmelCase__ = 16 elif "shot" in model_name: UpperCAmelCase__ = 32 UpperCAmelCase__ = get_xclip_config(snake_case__ , snake_case__ ) UpperCAmelCase__ = XCLIPModel(snake_case__ ) model.eval() if "drive" in checkpoint_url: UpperCAmelCase__ = 'pytorch_model.bin' gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ ) UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model'] else: UpperCAmelCase__ = torch.hub.load_state_dict_from_url(snake_case__ )['model'] UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ ) UpperCAmelCase__ = XCLIPModel(snake_case__ ) UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() UpperCAmelCase__ = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24 UpperCAmelCase__ = VideoMAEImageProcessor(size=snake_case__ ) UpperCAmelCase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) UpperCAmelCase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) UpperCAmelCase__ = XCLIPProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) UpperCAmelCase__ = prepare_video(snake_case__ ) UpperCAmelCase__ = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case__ , return_tensors='pt' , padding=snake_case__ ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): UpperCAmelCase__ = model(**snake_case__ ) # Verify outputs UpperCAmelCase__ = outputs.logits_per_video UpperCAmelCase__ = logits_per_video.softmax(dim=1 ) print('Probs:' , snake_case__ ) # kinetics-400 if model_name == "xclip-base-patch32": UpperCAmelCase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] ) elif model_name == "xclip-base-patch32-16-frames": UpperCAmelCase__ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] ) elif model_name == "xclip-base-patch16": UpperCAmelCase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] ) elif model_name == "xclip-base-patch16-16-frames": UpperCAmelCase__ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] ) elif model_name == "xclip-large-patch14": UpperCAmelCase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] ) elif model_name == "xclip-large-patch14-16-frames": UpperCAmelCase__ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": UpperCAmelCase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": UpperCAmelCase__ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": UpperCAmelCase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": UpperCAmelCase__ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": UpperCAmelCase__ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": UpperCAmelCase__ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": UpperCAmelCase__ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": UpperCAmelCase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": UpperCAmelCase__ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": UpperCAmelCase__ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] ) else: raise ValueError(f"Model name {model_name} not supported" ) assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(snake_case__ , organization='nielsr' ) processor.push_to_hub(snake_case__ , organization='nielsr' ) slow_tokenizer.push_to_hub(snake_case__ , organization='nielsr' ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''xclip-base-patch32''', type=str, help='''Name of the model.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _UpperCamelCase = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
335
1
"""simple docstring""" import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING A: List[str] = logging.get_logger(__name__) A: Dict = { "microsoft/conditional-detr-resnet-50": ( "https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json" ), } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Any = 'conditional_detr' __lowerCAmelCase : Union[str, Any] = ['past_key_values'] __lowerCAmelCase : int = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.25 , **_SCREAMING_SNAKE_CASE , ) -> Tuple: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) UpperCAmelCase : Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase : str = backbone_config.get("""model_type""" ) UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase : Union[str, Any] = config_class.from_dict(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = use_timm_backbone UpperCAmelCase : Optional[int] = backbone_config UpperCAmelCase : List[str] = num_channels UpperCAmelCase : Any = num_queries UpperCAmelCase : Union[str, Any] = d_model UpperCAmelCase : List[str] = encoder_ffn_dim UpperCAmelCase : Optional[int] = encoder_layers UpperCAmelCase : Union[str, Any] = encoder_attention_heads UpperCAmelCase : Optional[Any] = decoder_ffn_dim UpperCAmelCase : Any = decoder_layers UpperCAmelCase : Optional[int] = decoder_attention_heads UpperCAmelCase : Optional[int] = dropout UpperCAmelCase : Dict = attention_dropout UpperCAmelCase : Dict = activation_dropout UpperCAmelCase : Any = activation_function UpperCAmelCase : Any = init_std UpperCAmelCase : Tuple = init_xavier_std UpperCAmelCase : Optional[int] = encoder_layerdrop UpperCAmelCase : Any = decoder_layerdrop UpperCAmelCase : Any = encoder_layers UpperCAmelCase : Optional[Any] = auxiliary_loss UpperCAmelCase : List[Any] = position_embedding_type UpperCAmelCase : Union[str, Any] = backbone UpperCAmelCase : List[Any] = use_pretrained_backbone UpperCAmelCase : Dict = dilation # Hungarian matcher UpperCAmelCase : Optional[int] = class_cost UpperCAmelCase : List[str] = bbox_cost UpperCAmelCase : List[str] = giou_cost # Loss coefficients UpperCAmelCase : List[Any] = mask_loss_coefficient UpperCAmelCase : List[str] = dice_loss_coefficient UpperCAmelCase : Optional[int] = cls_loss_coefficient UpperCAmelCase : Union[str, Any] = bbox_loss_coefficient UpperCAmelCase : Union[str, Any] = giou_loss_coefficient UpperCAmelCase : int = focal_alpha super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return self.d_model def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: UpperCAmelCase : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase : Dict = self.__class__.model_type return output class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Any = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def SCREAMING_SNAKE_CASE ( self ) -> float: '''simple docstring''' return 1E-5 @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return 12
109
"""simple docstring""" import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging A: str = logging.get_logger(__name__) A: List[Any] = {"vocab_file": "vocab.txt"} A: List[str] = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } A: Dict = { "facebook/esm2_t6_8M_UR50D": 1_0_2_4, "facebook/esm2_t12_35M_UR50D": 1_0_2_4, } def _snake_case ( UpperCamelCase : int ): with open(UpperCamelCase , """r""" ) as f: UpperCAmelCase : int = f.read().splitlines() return [l.strip() for l in lines] class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES __lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : str = ['input_ids', 'attention_mask'] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ) -> Any: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = load_vocab_file(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = dict(enumerate(self.all_tokens ) ) UpperCAmelCase : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )} UpperCAmelCase : Any = unk_token UpperCAmelCase : str = cls_token UpperCAmelCase : int = pad_token UpperCAmelCase : Tuple = mask_token UpperCAmelCase : str = eos_token UpperCAmelCase : List[str] = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' return text.split() def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: '''simple docstring''' return len(self._id_to_token ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' return {token: i for i, token in enumerate(self.all_tokens )} def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : Optional[Any] = [self.cls_token_id] UpperCAmelCase : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] UpperCAmelCase : str = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] if token_ids_a is not None: mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1] return mask def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" ) with open(_SCREAMING_SNAKE_CASE , """w""" ) as f: f.write("""\n""".join(self.all_tokens ) ) return (vocab_file,) @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> int: '''simple docstring''' return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
109
1
from math import factorial def __UpperCAmelCase ( lowercase = 1_00 ): """simple docstring""" return sum(map(_A ,str(factorial(_A ) ) ) ) if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
357
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'layoutlmv3' def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ): super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) _UpperCAmelCase = max_ad_position_embeddings _UpperCAmelCase = coordinate_size _UpperCAmelCase = shape_size _UpperCAmelCase = has_relative_attention_bias _UpperCAmelCase = rel_pos_bins _UpperCAmelCase = max_rel_pos _UpperCAmelCase = has_spatial_attention_bias _UpperCAmelCase = rel_ad_pos_bins _UpperCAmelCase = max_rel_ad_pos _UpperCAmelCase = text_embed _UpperCAmelCase = visual_embed _UpperCAmelCase = input_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = classifier_dropout class a ( lowerCAmelCase_ ): _snake_case : str = version.parse('1.12' ) @property def lowerCAmelCase_ ( self : Dict ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def lowerCAmelCase_ ( self : List[Any] ): return 1e-5 @property def lowerCAmelCase_ ( self : List[str] ): return 12 def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes _UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) _UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
30
0
import argparse import os import torch from transformers.utils import WEIGHTS_NAME __lowerCamelCase = ["""small""", """medium""", """large"""] __lowerCamelCase = """lm_head.decoder.weight""" __lowerCamelCase = """lm_head.weight""" def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ): snake_case : Optional[int] = torch.load(__lowerCamelCase ) snake_case : Any = d.pop(__lowerCamelCase ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) __lowerCamelCase = parser.parse_args() for MODEL in DIALOGPT_MODELS: __lowerCamelCase = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl') __lowerCamelCase = F'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
59
"""simple docstring""" import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase__: def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=64 ,__UpperCAmelCase=5 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,) -> List[Any]: A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = vocab_size - 1 def snake_case__ ( self ) -> str: A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A__ = self.get_config() return config, input_ids, input_mask, token_labels def snake_case__ ( self ) -> List[str]: return GPTNeoXConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,) def snake_case__ ( self ) -> List[str]: A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = True return config, input_ids, input_mask, token_labels def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]: A__ = GPTNeoXModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ) A__ = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: A__ = True A__ = GPTNeoXModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict: A__ = GPTNeoXForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict: A__ = self.num_labels A__ = GPTNeoXForQuestionAnswering(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str: A__ = self.num_labels A__ = GPTNeoXForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict: A__ = self.num_labels A__ = GPTNeoXForTokenClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict: A__ = True A__ = GPTNeoXForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,use_cache=__UpperCAmelCase ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens] ,dim=-1 ) A__ = torch.cat([input_mask, next_mask] ,dim=-1 ) A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) A__ = output_from_no_past['hidden_states'][0] A__ = model( __UpperCAmelCase ,attention_mask=__UpperCAmelCase ,past_key_values=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,)['hidden_states'][0] # select random slice A__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-3 ) ) def snake_case__ ( self ) -> Dict: A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase__( __A , __A , __A , unittest.TestCase ): lowerCAmelCase__ : Optional[int] = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ : List[Any] = (GPTNeoXForCausalLM,) if is_torch_available() else () lowerCAmelCase__ : List[str] = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ : Union[str, Any] = False lowerCAmelCase__ : str = False lowerCAmelCase__ : Any = False lowerCAmelCase__ : str = False def snake_case__ ( self ) -> Tuple: A__ = GPTNeoXModelTester(self ) A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=64 ,num_attention_heads=8 ) def snake_case__ ( self ) -> str: self.config_tester.run_common_tests() def snake_case__ ( self ) -> List[str]: A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) def snake_case__ ( self ) -> Dict: A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) def snake_case__ ( self ) -> Optional[int]: # This regression test was failing with PyTorch < 1.3 A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) def snake_case__ ( self ) -> str: A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) def snake_case__ ( self ) -> Optional[int]: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase ) def snake_case__ ( self ) -> List[str]: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def snake_case__ ( self ) -> Any: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def snake_case__ ( self ) -> List[Any]: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @unittest.skip(reason='Feed forward chunking is not implemented' ) def snake_case__ ( self ) -> str: pass @parameterized.expand([('linear',), ('dynamic',)] ) def snake_case__ ( self ,__UpperCAmelCase ) -> Tuple: A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ids_tensor([1, 10] ,config.vocab_size ) A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A__ = GPTNeoXModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() A__ = original_model(__UpperCAmelCase ).last_hidden_state A__ = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A__ = {'type': scaling_type, 'factor': 1_0.0} A__ = GPTNeoXModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() A__ = scaled_model(__UpperCAmelCase ).last_hidden_state A__ = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) ) @require_torch class UpperCamelCase__( unittest.TestCase ): @slow def snake_case__ ( self ) -> int: A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' ) for checkpointing in [True, False]: A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(__UpperCAmelCase ) A__ = tokenizer('My favorite food is' ,return_tensors='pt' ).to(__UpperCAmelCase ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' A__ = model.generate(**__UpperCAmelCase ,do_sample=__UpperCAmelCase ,max_new_tokens=20 ) A__ = tokenizer.batch_decode(__UpperCAmelCase )[0] self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
221
0
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = ["""image_processor""", """tokenizer"""] SCREAMING_SNAKE_CASE_ : Optional[int] = """BlipImageProcessor""" SCREAMING_SNAKE_CASE_ : str = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: SCREAMING_SNAKE_CASE = False super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = self.image_processor def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: SCREAMING_SNAKE_CASE = self.tokenizer SCREAMING_SNAKE_CASE = self.tokenizer( text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) return text_encoding # add pixel_values SCREAMING_SNAKE_CASE = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) if text is not None: SCREAMING_SNAKE_CASE = self.tokenizer( text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) else: SCREAMING_SNAKE_CASE = None if text_encoding is not None: encoding_image_processor.update(lowerCAmelCase__ ) return encoding_image_processor def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any: return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int: return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @property def __A ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
38
"""simple docstring""" import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]: return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :] def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]="attention" ) -> List[Any]: SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] ) SCREAMING_SNAKE_CASE = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] ) SCREAMING_SNAKE_CASE = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] ) SCREAMING_SNAKE_CASE = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] ) SCREAMING_SNAKE_CASE = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=False ) -> List[Any]: if split_mlp_wi: SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :] SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :] SCREAMING_SNAKE_CASE = (wi_a, wi_a) else: SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :] SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :] return wi, wo def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]: return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i] def lowercase (SCREAMING_SNAKE_CASE_ : dict , *, SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : bool = False ) -> Tuple: SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(variables['target'] ) SCREAMING_SNAKE_CASE = {'/'.join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi SCREAMING_SNAKE_CASE = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = collections.OrderedDict() # Shared embeddings. SCREAMING_SNAKE_CASE = old['token_embedder/embedding'] # Encoder. for i in range(SCREAMING_SNAKE_CASE_ ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_attention_layer_norm' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'attention' ) SCREAMING_SNAKE_CASE = layer_norm SCREAMING_SNAKE_CASE = k.T SCREAMING_SNAKE_CASE = o.T SCREAMING_SNAKE_CASE = q.T SCREAMING_SNAKE_CASE = v.T # Block i, layer 1 (MLP). SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_mlp_layer_norm' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE = wi[0].T SCREAMING_SNAKE_CASE = wi[1].T else: SCREAMING_SNAKE_CASE = wi.T SCREAMING_SNAKE_CASE = wo.T if scalable_attention: # convert the rel_embedding of each layer SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' ).T SCREAMING_SNAKE_CASE = old['encoder/encoder_norm/scale'] if not scalable_attention: SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE_ , 0 , 'encoder' ).T SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE_ , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(SCREAMING_SNAKE_CASE_ ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_self_attention_layer_norm' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'self_attention' ) SCREAMING_SNAKE_CASE = layer_norm SCREAMING_SNAKE_CASE = k.T SCREAMING_SNAKE_CASE = o.T SCREAMING_SNAKE_CASE = q.T SCREAMING_SNAKE_CASE = v.T # Block i, layer 1 (Cross Attention). SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_cross_attention_layer_norm' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'encoder_decoder_attention' ) SCREAMING_SNAKE_CASE = layer_norm SCREAMING_SNAKE_CASE = k.T SCREAMING_SNAKE_CASE = o.T SCREAMING_SNAKE_CASE = q.T SCREAMING_SNAKE_CASE = v.T # Block i, layer 2 (MLP). SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_mlp_layer_norm' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE = wi[0].T SCREAMING_SNAKE_CASE = wi[1].T else: SCREAMING_SNAKE_CASE = wi.T SCREAMING_SNAKE_CASE = wo.T if scalable_attention: # convert the rel_embedding of each layer SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' ).T SCREAMING_SNAKE_CASE = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: SCREAMING_SNAKE_CASE = old['decoder/logits_dense/kernel'].T return new def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : bool ) -> int: SCREAMING_SNAKE_CASE = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) SCREAMING_SNAKE_CASE = state_dict['shared.weight'] return state_dict def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int: SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = convert_tax_to_pytorch( SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ , scalable_attention=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) def lowercase (SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Any: SCREAMING_SNAKE_CASE = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(F'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: SCREAMING_SNAKE_CASE = UMTaEncoderModel(SCREAMING_SNAKE_CASE_ ) else: SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Verify that we can load the checkpoint. model.from_pretrained(SCREAMING_SNAKE_CASE_ ) print('Done' ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) __UpperCamelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
38
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "vit_msn" def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int: super().__init__(**_UpperCamelCase ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias
8
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : int =logging.get_logger(__name__) lowerCAmelCase : List[str] ='''▁''' lowerCAmelCase : List[str] ={ '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCAmelCase : Optional[Any] ={ '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } lowerCAmelCase : int ={ '''facebook/m2m100_418M''': 1_024, } # fmt: off lowerCAmelCase : str ={ '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class a_ ( _lowerCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = PRETRAINED_VOCAB_FILES_MAP __A = ["input_ids", "attention_mask"] __A = [] __A = [] def __init__( self : Any , lowercase : Any , lowercase : List[Any] , lowercase : int=None , lowercase : Optional[Any]=None , lowercase : Union[str, Any]="<s>" , lowercase : Any="</s>" , lowercase : Optional[int]="</s>" , lowercase : List[Any]="<pad>" , lowercase : Optional[int]="<unk>" , lowercase : Optional[int]="m2m100" , lowercase : Optional[Dict[str, Any]] = None , lowercase : Any=8 , **lowercase : int , ): """simple docstring""" lowercase_ :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs lowercase_ :Optional[Any] = language_codes lowercase_ :Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes] lowercase_ :List[Any] = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code} lowercase_ :Union[str, Any] = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(lowercase ) for lang_code in fairseq_language_code if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , ) lowercase_ :Optional[int] = vocab_file lowercase_ :Any = load_json(lowercase ) lowercase_ :Optional[Any] = {v: k for k, v in self.encoder.items()} lowercase_ :List[str] = spm_file lowercase_ :List[str] = load_spm(lowercase , self.sp_model_kwargs ) lowercase_ :Optional[int] = len(self.encoder ) lowercase_ :int = { self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase ) } lowercase_ :List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )} lowercase_ :List[Any] = {v: k for k, v in self.lang_token_to_id.items()} lowercase_ :int = src_lang if src_lang is not None else "en" lowercase_ :Union[str, Any] = tgt_lang lowercase_ :List[Any] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) lowercase_ :int = num_madeup_words @property def lowercase__ ( self : List[str] ): """simple docstring""" return len(self.encoder ) + len(self.lang_token_to_id ) @property def lowercase__ ( self : Any ): """simple docstring""" return self._src_lang @src_lang.setter def lowercase__ ( self : Optional[int] , lowercase : str ): """simple docstring""" lowercase_ :str = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase__ ( self : Dict , lowercase : str ): """simple docstring""" return self.sp_model.encode(lowercase , out_type=lowercase ) def lowercase__ ( self : Tuple , lowercase : Dict ): """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(lowercase , self.encoder[self.unk_token] ) def lowercase__ ( self : Any , lowercase : int ): """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(lowercase , self.unk_token ) def lowercase__ ( self : int , lowercase : int ): """simple docstring""" lowercase_ :Optional[Any] = [] lowercase_ :Any = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase ) + token lowercase_ :str = [] else: current_sub_tokens.append(lowercase ) out_string += self.sp_model.decode(lowercase ) return out_string.strip() def lowercase__ ( self : Any , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase ) lowercase_ :List[Any] = [1] * len(self.prefix_tokens ) lowercase_ :List[Any] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(lowercase )) + suffix_ones return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones def lowercase__ ( self : Union[str, Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase__ ( self : Union[str, Any] ): """simple docstring""" lowercase_ :str = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ): """simple docstring""" lowercase_ :Any = self.__dict__.copy() lowercase_ :str = None return state def __setstate__( self : Tuple , lowercase : Dict ): """simple docstring""" lowercase_ :int = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowercase_ :List[str] = {} lowercase_ :List[Any] = load_spm(self.spm_file , self.sp_model_kwargs ) def lowercase__ ( self : str , lowercase : str , lowercase : Optional[str] = None ): """simple docstring""" lowercase_ :Dict = Path(lowercase ) if not save_dir.is_dir(): raise OSError(F'{save_directory} should be a directory' ) lowercase_ :Dict = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) lowercase_ :Dict = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder , lowercase ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , lowercase ) elif not os.path.isfile(self.spm_file ): with open(lowercase , "wb" ) as fi: lowercase_ :List[str] = self.sp_model.serialized_model_proto() fi.write(lowercase ) return (str(lowercase ), str(lowercase )) def lowercase__ ( self : List[str] , lowercase : List[str] , lowercase : str = "en" , lowercase : Optional[List[str]] = None , lowercase : str = "ro" , **lowercase : Optional[int] , ): """simple docstring""" lowercase_ :int = src_lang lowercase_ :Optional[int] = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase ) def lowercase__ ( self : List[Any] , lowercase : Any , lowercase : Optional[str] , lowercase : Optional[str] , **lowercase : Union[str, Any] ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowercase_ :List[str] = src_lang lowercase_ :Union[str, Any] = self(lowercase , add_special_tokens=lowercase , **lowercase ) lowercase_ :str = self.get_lang_id(lowercase ) lowercase_ :Union[str, Any] = tgt_lang_id return inputs def lowercase__ ( self : str ): """simple docstring""" self.set_src_lang_special_tokens(self.src_lang ) def lowercase__ ( self : Tuple ): """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase__ ( self : str , lowercase : str ): """simple docstring""" lowercase_ :List[str] = self.get_lang_token(lowercase ) lowercase_ :List[str] = self.lang_token_to_id[lang_token] lowercase_ :List[Any] = [self.cur_lang_id] lowercase_ :str = [self.eos_token_id] def lowercase__ ( self : str , lowercase : str ): """simple docstring""" lowercase_ :Optional[int] = self.get_lang_token(lowercase ) lowercase_ :Tuple = self.lang_token_to_id[lang_token] lowercase_ :Dict = [self.cur_lang_id] lowercase_ :List[Any] = [self.eos_token_id] def lowercase__ ( self : Union[str, Any] , lowercase : str ): """simple docstring""" return self.lang_code_to_token[lang] def lowercase__ ( self : Dict , lowercase : str ): """simple docstring""" lowercase_ :Union[str, Any] = self.get_lang_token(lowercase ) return self.lang_token_to_id[lang_token] def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Dict[str, Any] ): lowercase_ :List[str] = sentencepiece.SentencePieceProcessor(**__lowerCamelCase ) spm.Load(str(__lowerCamelCase ) ) return spm def UpperCAmelCase_ ( __lowerCamelCase : str ): with open(__lowerCamelCase ,"r" ) as f: return json.load(__lowerCamelCase ) def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : str ): with open(__lowerCamelCase ,"w" ) as f: json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=2 )
223
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Optional[Any] = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class __A (snake_case__): '''simple docstring''' __lowercase: str = """roberta""" def __init__( self : List[Any] , UpperCAmelCase_ : str=50_265 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Dict="absolute" , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Union[str, Any] , ) ->Tuple: """simple docstring""" super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = position_embedding_type snake_case_ = use_cache snake_case_ = classifier_dropout class __A (snake_case__): '''simple docstring''' @property def lowerCAmelCase ( self : int ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": snake_case_ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case_ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
233
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator class __A : '''simple docstring''' def __init__( self : Any , UpperCAmelCase_ : int ) ->None: """simple docstring""" snake_case_ = value snake_case_ = None snake_case_ = None class __A : '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Node ) ->None: """simple docstring""" snake_case_ = tree def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Node | None ) ->int: """simple docstring""" if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : str ) ->Iterator[int]: """simple docstring""" yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
233
1
import numpy # List of input, output pairs _UpperCAmelCase : List[str] =( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) _UpperCAmelCase : Any =(((515, 22, 13), 555), ((61, 35, 49), 150)) _UpperCAmelCase : Tuple =[2, 4, 1, 5] _UpperCAmelCase : List[Any] =len(train_data) _UpperCAmelCase : Union[str, Any] =0.009 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_="train" )-> Dict: return calculate_hypothesis_value(lowerCAmelCase_ , lowerCAmelCase_ ) - output( lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Tuple = 0 for i in range(len(lowerCAmelCase_ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=m )-> str: lowerCAmelCase_ : List[str] = 0 for i in range(lowerCAmelCase_ ): if index == -1: summation_value += _error(lowerCAmelCase_ ) else: summation_value += _error(lowerCAmelCase_ ) * train_data[i][0][index] return summation_value def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]: lowerCAmelCase_ : int = summation_of_cost_derivative(lowerCAmelCase_ , lowerCAmelCase_ ) / m return cost_derivative_value def lowerCAmelCase ( )-> Optional[Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCAmelCase_ : str = 0.000002 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Union[str, Any] = 0 while True: j += 1 lowerCAmelCase_ : Dict = [0, 0, 0, 0] for i in range(0 , len(lowerCAmelCase_ ) ): lowerCAmelCase_ : Tuple = get_cost_derivative(i - 1 ) lowerCAmelCase_ : Any = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowerCAmelCase_ , lowerCAmelCase_ , atol=lowerCAmelCase_ , rtol=lowerCAmelCase_ , ): break lowerCAmelCase_ : Any = temp_parameter_vector print(('''Number of iterations:''', j) ) def lowerCAmelCase ( )-> Union[str, Any]: for i in range(len(lowerCAmelCase_ ) ): print(('''Actual output value:''', output(lowerCAmelCase_ , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(lowerCAmelCase_ , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
262
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _UpperCAmelCase : Optional[int] =25_0004 _UpperCAmelCase : Tuple =25_0020 @require_sentencepiece @require_tokenizers class snake_case__( UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = MBartTokenizer SCREAMING_SNAKE_CASE__ : Dict = MBartTokenizerFast SCREAMING_SNAKE_CASE__ : Tuple = True SCREAMING_SNAKE_CASE__ : List[str] = True def lowercase_ ( self ) -> Dict: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ : str = MBartTokenizer(__lowercase , keep_accents=__lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[int] = MBartTokenizer(__lowercase , keep_accents=__lowercase ) lowerCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowercase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(__lowercase ) self.assertListEqual( __lowercase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowercase ) self.assertListEqual( __lowercase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def lowercase_ ( self ) -> Dict: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase ) lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase ) lowerCAmelCase_ : Tuple = tempfile.mkdtemp() lowerCAmelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(__lowercase ) lowerCAmelCase_ : Dict = tokenizer_p.save_pretrained(__lowercase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCAmelCase_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(__lowercase , __lowercase ) # Checks everything loads correctly in the same way lowerCAmelCase_ : Tuple = tokenizer_r.from_pretrained(__lowercase ) lowerCAmelCase_ : Dict = tokenizer_p.from_pretrained(__lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowercase , __lowercase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowercase ) # Save tokenizer rust, legacy_format=True lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp() lowerCAmelCase_ : int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase ) lowerCAmelCase_ : Tuple = tokenizer_p.save_pretrained(__lowercase ) # Checks it save with the same files self.assertSequenceEqual(__lowercase , __lowercase ) # Checks everything loads correctly in the same way lowerCAmelCase_ : Optional[int] = tokenizer_r.from_pretrained(__lowercase ) lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowercase , __lowercase ) ) shutil.rmtree(__lowercase ) # Save tokenizer rust, legacy_format=False lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp() lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase ) lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(__lowercase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCAmelCase_ : Dict = tokenizer_r.from_pretrained(__lowercase ) lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowercase , __lowercase ) ) shutil.rmtree(__lowercase ) @require_torch @require_sentencepiece @require_tokenizers class snake_case__( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = """facebook/mbart-large-en-ro""" SCREAMING_SNAKE_CASE__ : int = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] SCREAMING_SNAKE_CASE__ : Optional[int] = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei""" """ pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor""" """ face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] SCREAMING_SNAKE_CASE__ : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def lowercase_ ( cls ) -> Optional[int]: lowerCAmelCase_ : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) lowerCAmelCase_ : Optional[Any] = 1 return cls def lowercase_ ( self ) -> Optional[Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 ) def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowercase ) def lowercase_ ( self ) -> Any: self.assertIn(__lowercase , self.tokenizer.all_special_ids ) lowerCAmelCase_ : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2] lowerCAmelCase_ : Tuple = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) lowerCAmelCase_ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase ) self.assertEqual(__lowercase , __lowercase ) self.assertNotIn(self.tokenizer.eos_token , __lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Union[str, Any] = ['''this is gunna be a long sentence ''' * 2_0] assert isinstance(src_text[0] , __lowercase ) lowerCAmelCase_ : str = 1_0 lowerCAmelCase_ : Tuple = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , __lowercase ) self.assertEqual(len(__lowercase ) , __lowercase ) def lowercase_ ( self ) -> int: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Any = tempfile.mkdtemp() lowerCAmelCase_ : int = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__lowercase ) lowerCAmelCase_ : Optional[Any] = MBartTokenizer.from_pretrained(__lowercase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase ) @require_torch def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' ) lowerCAmelCase_ : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : str = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) lowerCAmelCase_ : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(__lowercase , __lowercase ) self.assertEqual((2, 1_4) , batch.input_ids.shape ) self.assertEqual((2, 1_4) , batch.attention_mask.shape ) lowerCAmelCase_ : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __lowercase ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' ) lowerCAmelCase_ : Any = self.tokenizer( text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=1_0 , return_tensors='''pt''' ) lowerCAmelCase_ : int = targets['''input_ids'''] lowerCAmelCase_ : Optional[Any] = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Any = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(__lowercase ) , { # A, test, EOS, en_XX '''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 2_5_0_0_0_1, } , )
262
1
'''simple docstring''' from math import factorial _SCREAMING_SNAKE_CASE = {str(d): factorial(d) for d in range(1_0)} def _lowerCAmelCase ( lowerCamelCase_ : int ): return sum(DIGIT_FACTORIAL[d] for d in str(lowerCamelCase_ ) ) def _lowerCAmelCase ( ): __lowercase = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , lowerCamelCase_ ) if sum_of_digit_factorial(lowerCamelCase_ ) == i ) if __name__ == "__main__": print(f'''{solution() = }''')
217
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
217
1
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
91
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _lowercase ( A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False def __magic_name__( self :str ) -> Dict: super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] __SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str: __SCREAMING_SNAKE_CASE : Optional[Any] = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running''' __SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running''' return input_text, output_text def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] ) def __magic_name__( self :Tuple ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def __magic_name__( self :Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __magic_name__( self :Dict ) -> List[Any]: __SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' __SCREAMING_SNAKE_CASE : Optional[int] = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ ) def __magic_name__( self :str ) -> int: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
9
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule __A = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
364
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Dict = DebertaVaTokenizer _UpperCAmelCase :Tuple = DebertaVaTokenizerFast _UpperCAmelCase :int = True _UpperCAmelCase :int = True def _snake_case ( self ): super().setUp() # We have a SentencePiece fixture for testing lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[str] = '''this is a test''' lowercase__: int = '''this is a test''' return input_text, output_text def _snake_case ( self ): lowercase__: Optional[int] = '''<pad>''' lowercase__: Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(_UpperCAmelCase ) , 30001 ) def _snake_case ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _snake_case ( self ): # fmt: off lowercase__: int = ''' \tHeLLo!how \n Are yoU? ''' lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def _snake_case ( self ): pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def _snake_case ( self ): pass def _snake_case ( self ): # fmt: off lowercase__: Dict = '''I was born in 92000, and this is falsé.''' lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Any = '''I was born in 92000, and this is falsé.''' lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: List[str] = '''I was born in 92000, and this is falsé.''' lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.''' lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): # fmt: off lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? ''' lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase ) lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: int = self.get_tokenizer() lowercase__: List[Any] = self.get_rust_tokenizer() lowercase__: List[str] = '''I was born in 92000, and this is falsé.''' lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = self.get_rust_tokenizer() lowercase__: str = tokenizer.encode(_UpperCAmelCase ) lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[Any] = '''This is a test''' lowercase__: str = [13, 1, 4398, 25, 21, 1289] lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase ) lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: str = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # fmt: off lowercase__: str = '''I was born in 92000, and this is falsé.''' lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self ): lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase ) lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' ) lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' ) lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , ) @slow def _snake_case ( self ): # fmt: off lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
2
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
def lowerCamelCase__ (_UpperCAmelCase): def merge(_UpperCAmelCase , _UpperCAmelCase) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0) yield from left yield from right return list(_merge()) if len(_UpperCAmelCase) <= 1: return collection SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) // 2 return merge(merge_sort(collection[:mid]) , merge_sort(collection[mid:])) if __name__ == "__main__": import doctest doctest.testmod() a_ : Tuple = input('Enter numbers separated by a comma:\n').strip() a_ : Optional[Any] = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
137
0
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class _a : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : str=100 , UpperCAmelCase : Optional[int]=13 , UpperCAmelCase : Dict=30 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=3 , UpperCAmelCase : int=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : int=4 , UpperCAmelCase : Any=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=10 , UpperCAmelCase : Dict=0.02 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Any=[0, 1, 2, 3] , ): A_ = parent A_ = 100 A_ = batch_size A_ = image_size A_ = patch_size A_ = num_channels A_ = is_training A_ = use_labels A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = type_sequence_label_size A_ = initializer_range A_ = scope A_ = out_indices A_ = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ = (image_size // patch_size) ** 2 A_ = num_patches + 1 def __A ( self : int ): A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A_ = self.get_config() return config, pixel_values, labels, pixel_labels def __A ( self : Dict ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] ): A_ = BeitModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] ): A_ = BeitForMaskedImageModeling(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] ): A_ = self.type_sequence_label_size A_ = BeitForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ = 1 A_ = BeitForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] ): A_ = self.num_labels A_ = BeitForSemanticSegmentation(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) A_ = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __A ( self : Dict ): A_ = self.prepare_config_and_inputs() A_ , A_ , A_ , A_ = config_and_inputs A_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[Any] = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _lowerCamelCase : int = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _lowerCamelCase : Optional[int] = False _lowerCamelCase : List[str] = False _lowerCamelCase : Dict = False def __A ( self : str ): A_ = BeitModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 ) def __A ( self : int ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __A ( self : Optional[int] ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __A ( self : Union[str, Any] ): pass def __A ( self : int ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = model_class(UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) ) def __A ( self : Union[str, Any] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = model_class(UpperCAmelCase ) A_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ = [*signature.parameters.keys()] A_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase ) def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase ) def __A ( self : Any ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase ) def __A ( self : Union[str, Any] ): if not self.model_tester.is_training: return A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(UpperCAmelCase ), BeitForMaskedImageModeling]: continue A_ = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) A_ = model(**UpperCAmelCase ).loss loss.backward() def __A ( self : List[Any] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A_ = False A_ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(UpperCAmelCase ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue A_ = model_class(UpperCAmelCase ) model.gradient_checkpointing_enable() model.to(UpperCAmelCase ) model.train() A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) A_ = model(**UpperCAmelCase ).loss loss.backward() def __A ( self : Optional[Any] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = _config_zero_init(UpperCAmelCase ) for model_class in self.all_model_classes: A_ = model_class(config=UpperCAmelCase ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def __A ( self : Dict ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = BeitModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def __snake_case ( ): """simple docstring""" A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _a ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __A ( self : Dict ): A_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(UpperCAmelCase ) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).pixel_values.to(UpperCAmelCase ) # prepare bool_masked_pos A_ = torch.ones((1, 196) , dtype=torch.bool ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): A_ = model(pixel_values=UpperCAmelCase , bool_masked_pos=UpperCAmelCase ) A_ = outputs.logits # verify the logits A_ = torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape , UpperCAmelCase ) A_ = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCAmelCase , atol=1E-2 ) ) @slow def __A ( self : List[str] ): A_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(UpperCAmelCase ) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): A_ = model(**UpperCAmelCase ) A_ = outputs.logits # verify the logits A_ = torch.Size((1, 1000) ) self.assertEqual(logits.shape , UpperCAmelCase ) A_ = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 ) ) A_ = 281 self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase ) @slow def __A ( self : Any ): A_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( UpperCAmelCase ) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): A_ = model(**UpperCAmelCase ) A_ = outputs.logits # verify the logits A_ = torch.Size((1, 21841) ) self.assertEqual(logits.shape , UpperCAmelCase ) A_ = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 ) ) A_ = 2396 self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase ) @slow def __A ( self : Any ): A_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) A_ = model.to(UpperCAmelCase ) A_ = BeitImageProcessor(do_resize=UpperCAmelCase , size=640 , do_center_crop=UpperCAmelCase ) A_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) A_ = Image.open(ds[0]["file"] ) A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): A_ = model(**UpperCAmelCase ) A_ = outputs.logits # verify the logits A_ = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , UpperCAmelCase ) A_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: A_ = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ] , device=UpperCAmelCase , ) else: A_ = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ] , device=UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self : Optional[Any] ): A_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) A_ = model.to(UpperCAmelCase ) A_ = BeitImageProcessor(do_resize=UpperCAmelCase , size=640 , do_center_crop=UpperCAmelCase ) A_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) A_ = Image.open(ds[0]["file"] ) A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): A_ = model(**UpperCAmelCase ) A_ = outputs.logits.detach().cpu() A_ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase , target_sizes=[(500, 300)] ) A_ = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , UpperCAmelCase ) A_ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase ) A_ = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , UpperCAmelCase )
329
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ): return False return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ): """simple docstring""" A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) A_ = is_compiled_module(__UpperCamelCase ) if is_compiled: A_ = model A_ = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = model.module if not keep_fpaa_wrapper: A_ = getattr(__UpperCamelCase ,"forward" ) A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase ) if original_forward is not None: while hasattr(__UpperCamelCase ,"__wrapped__" ): A_ = forward.__wrapped__ if forward == original_forward: break A_ = forward if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ): convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase ) if is_compiled: A_ = model A_ = compiled_model return model def __snake_case ( ): """simple docstring""" PartialState().wait_for_everyone() def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(__UpperCamelCase ,__UpperCamelCase ) elif PartialState().local_process_index == 0: torch.save(__UpperCamelCase ,__UpperCamelCase ) @contextmanager def __snake_case ( **__UpperCamelCase : Any ): """simple docstring""" for key, value in kwargs.items(): A_ = str(__UpperCamelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ): A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase ) if hasattr(__UpperCamelCase ,"__qualname__" ): return obj.__qualname__ if hasattr(__UpperCamelCase ,"__name__" ): return obj.__name__ return str(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ): """simple docstring""" for key, value in source.items(): if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = destination.setdefault(__UpperCamelCase ,{} ) merge_dicts(__UpperCamelCase ,__UpperCamelCase ) else: A_ = value return destination def __snake_case ( __UpperCamelCase : int = None ): """simple docstring""" if port is None: A_ = 2_9500 with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
329
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionSAGPipeline SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): torch.manual_seed(0 ) UpperCamelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) UpperCamelCase__ = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , ) torch.manual_seed(0 ) UpperCamelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) UpperCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase__ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ): if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ): UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase_ (self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ (self ): UpperCamelCase__ = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) UpperCamelCase__ = sag_pipe.to(SCREAMING_SNAKE_CASE_ ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """.""" UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) UpperCamelCase__ = output.images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def UpperCAmelCase_ (self ): UpperCamelCase__ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) UpperCamelCase__ = sag_pipe.to(SCREAMING_SNAKE_CASE_ ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """.""" UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) UpperCamelCase__ = output.images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def UpperCAmelCase_ (self ): UpperCamelCase__ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) UpperCamelCase__ = sag_pipe.to(SCREAMING_SNAKE_CASE_ ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """.""" UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = sag_pipe( [prompt] , width=7_68 , height=5_12 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) UpperCamelCase__ = output.images assert image.shape == (1, 5_12, 7_68, 3)
244
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __magic_name__ ( __a : Optional[int] , __a : Union[str, Any] , __a : Union[str, Any]=1_024 , __a : str=1_024 , __a : Optional[Any]=False , **__a : Tuple ): '''simple docstring''' UpperCamelCase__ = AutoTokenizer.from_pretrained(__a ) UpperCamelCase__ = SeqaSeqDataset(__a , __a , __a , __a , type_path="""train""" , **__a ) UpperCamelCase__ = tok.pad_token_id def get_lens(__a : Optional[int] ): UpperCamelCase__ = tqdm( DataLoader(__a , batch_size=512 , num_workers=8 , shuffle=__a , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) UpperCamelCase__ = [] for batch in dl: UpperCamelCase__ = batch["""input_ids"""].ne(__a ).sum(1 ).tolist() UpperCamelCase__ = batch["""labels"""].ne(__a ).sum(1 ).tolist() if consider_target: for src, tgt in zip(__a , __a ): max_lens.append(max(__a , __a ) ) else: max_lens.extend(__a ) return max_lens UpperCamelCase__ = get_lens(__a ) UpperCamelCase__ = SeqaSeqDataset(__a , __a , __a , __a , type_path="""val""" , **__a ) UpperCamelCase__ = get_lens(__a ) pickle_save(__a , train_ds.len_file ) pickle_save(__a , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
244
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): __lowerCamelCase = StableDiffusionInstructPixaPixPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase ( self :str ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) lowercase__ = PNDMScheduler(skip_prk_steps=_lowercase ) torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowercase__ = CLIPTextModel(_lowercase ) lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowercase__ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCAmelCase ( self :Optional[int] , _lowercase :List[str] , _lowercase :str=0 ): '''simple docstring''' lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase ) lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ = Image.fromarray(np.uinta(_lowercase ) ).convert("RGB" ) if str(_lowercase ).startswith("mps" ): lowercase__ = torch.manual_seed(_lowercase ) else: lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) lowercase__ = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def UpperCAmelCase ( self :Optional[Any] ): '''simple docstring''' lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionInstructPixaPixPipeline(**_lowercase ) lowercase__ = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) lowercase__ = self.get_dummy_inputs(_lowercase ) lowercase__ = sd_pipe(**_lowercase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase__ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase ( self :List[str] ): '''simple docstring''' lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionInstructPixaPixPipeline(**_lowercase ) lowercase__ = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) lowercase__ = self.get_dummy_inputs(_lowercase ) lowercase__ = "french fries" lowercase__ = sd_pipe(**_lowercase , negative_prompt=_lowercase ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase__ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase ( self :Dict ): '''simple docstring''' lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionInstructPixaPixPipeline(**_lowercase ) lowercase__ = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) lowercase__ = self.get_dummy_inputs(_lowercase ) lowercase__ = [inputs["prompt"]] * 2 lowercase__ = np.array(inputs["image"] ).astype(np.floataa ) / 255.0 lowercase__ = torch.from_numpy(_lowercase ).unsqueeze(0 ).to(_lowercase ) lowercase__ = image / 2 + 0.5 lowercase__ = image.permute(0 , 3 , 1 , 2 ) lowercase__ = image.repeat(2 , 1 , 1 , 1 ) lowercase__ = sd_pipe(**_lowercase ).images lowercase__ = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowercase__ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase ( self :Union[str, Any] ): '''simple docstring''' lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" ) lowercase__ = StableDiffusionInstructPixaPixPipeline(**_lowercase ) lowercase__ = sd_pipe.to(_lowercase ) sd_pipe.set_progress_bar_config(disable=_lowercase ) lowercase__ = self.get_dummy_inputs(_lowercase ) lowercase__ = sd_pipe(**_lowercase ).images lowercase__ = image[0, -3:, -3:, -1] lowercase__ = [round(_lowercase , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(_lowercase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowercase__ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase ( self :int ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def UpperCAmelCase ( self :Union[str, Any] ): '''simple docstring''' lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionInstructPixaPixPipeline(**_lowercase ) lowercase__ = VaeImageProcessor(do_resize=_lowercase , do_normalize=_lowercase ) lowercase__ = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) lowercase__ = pipe(**self.get_dummy_inputs_by_type(_lowercase , input_image_type="pt" ) )[0] lowercase__ = components["vae"] lowercase__ = self.get_dummy_inputs_by_type(_lowercase , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowercase__ = vae.encode(inputs[image_param] ).latent_dist.mode() lowercase__ = pipe(**_lowercase )[0] lowercase__ = np.abs(out - out_latents_inputs ).max() self.assertLess(_lowercase , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class lowerCAmelCase ( unittest.TestCase ): def UpperCAmelCase ( self :Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self :Optional[Any] , _lowercase :Optional[int]=0 ): '''simple docstring''' lowercase__ = torch.manual_seed(_lowercase ) lowercase__ = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) lowercase__ = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def UpperCAmelCase ( self :str ): '''simple docstring''' lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_lowercase ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**_lowercase ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) lowercase__ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_lowercase ) lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**_lowercase ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) lowercase__ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCAmelCase ( self :str ): '''simple docstring''' lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_lowercase ) lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**_lowercase ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) lowercase__ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' lowercase__ = 0 def callback_fn(_lowercase :int , _lowercase :int , _lowercase :torch.FloatTensor ) -> None: lowercase__ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowercase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowercase__ = False lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_lowercase , torch_dtype=torch.floataa ) lowercase__ = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() pipe(**_lowercase , callback=_lowercase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def UpperCAmelCase ( self :Dict ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_lowercase , torch_dtype=torch.floataa ) lowercase__ = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase__ = self.get_inputs() lowercase__ = pipe(**_lowercase ) lowercase__ = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def UpperCAmelCase ( self :Optional[int] ): '''simple docstring''' lowercase__ = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowercase__ = inputs["image"].resize((5_04, 5_04) ) lowercase__ = "timbrooks/instruct-pix2pix" lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( _lowercase , safety_checker=_lowercase , ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) pipe.enable_attention_slicing() lowercase__ = pipe(**_lowercase ) lowercase__ = output.images[0] lowercase__ = image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 5_04, 3) lowercase__ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
201
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def _A ( __magic_name__ ): lowercase__ = checkpoints.load_tax_checkpoint(__magic_name__ ) lowercase__ = flatten_dict(__magic_name__ ) return flax_params def _A ( __magic_name__ ): lowercase__ = {} lowercase__ = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } lowercase__ = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowercase__ = ".".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowercase__ = new_key.replace(__magic_name__ , __magic_name__ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowercase__ = new_key.replace(__magic_name__ , __magic_name__ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowercase__ = re.sub(R"layers_(\d+)" , R"layer.\1" , __magic_name__ ) lowercase__ = new_key.replace("encoder" , "encoder.encoder" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowercase__ = re.sub(R"layers_(\d+)" , R"layer.\1" , __magic_name__ ) lowercase__ = flax_dict[key] lowercase__ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowercase__ = torch.from_numpy(converted_dict[key].T ) else: lowercase__ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def _A ( __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__=False ): lowercase__ = get_flax_param(__magic_name__ ) if not use_large: lowercase__ = PixaStructVisionConfig() lowercase__ = PixaStructTextConfig() else: lowercase__ = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowercase__ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowercase__ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__magic_name__ ) lowercase__ = PixaStructForConditionalGeneration(__magic_name__ ) lowercase__ = rename_and_convert_flax_params(__magic_name__ ) model.load_state_dict(__magic_name__ ) lowercase__ = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" ) lowercase__ = PixaStructImageProcessor() lowercase__ = PixaStructProcessor(image_processor=__magic_name__ , tokenizer=__magic_name__ ) if use_large: lowercase__ = 4096 lowercase__ = True # mkdir if needed os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) print("Model saved in {}".format(__magic_name__ ) ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") _snake_case = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
201
1
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase): _lowercase : Dict = AutoencoderKL _lowercase : Tuple = """sample""" _lowercase : Optional[int] = 1E-2 @property def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : Union[str, Any] =4 a__ : List[str] =3 a__ : Union[str, Any] =(3_2, 3_2) a__ : int =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase__ ) return {"sample": image} @property def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' return (3, 3_2, 3_2) @property def _lowercase ( self ) -> List[Any]: '''simple docstring''' return (3, 3_2, 3_2) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' a__ : List[Any] ={ "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } a__ : List[str] =self.dummy_input return init_dict, inputs_dict def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' pass def _lowercase ( self ) -> List[Any]: '''simple docstring''' pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ , a__ : Union[str, Any] =self.prepare_init_args_and_inputs_for_common() a__ : Tuple =self.model_class(**lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) assert not model.is_gradient_checkpointing and model.training a__ : Optional[Any] =model(**lowerCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() a__ : str =torch.randn_like(lowerCAmelCase__ ) a__ : Tuple =(out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing a__ : Any =self.model_class(**lowerCAmelCase__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowerCAmelCase__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training a__ : Dict =model_a(**lowerCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() a__ : int =(out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) a__ : Union[str, Any] =dict(model.named_parameters() ) a__ : int =dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ , a__ : Optional[int] =AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(lowerCAmelCase__ ) a__ : Optional[Any] =model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ : Tuple =AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) a__ : str =model.to(lowerCAmelCase__ ) model.eval() if torch_device == "mps": a__ : Any =torch.manual_seed(0 ) else: a__ : Union[str, Any] =torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) a__ : Union[str, Any] =torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) a__ : Union[str, Any] =image.to(lowerCAmelCase__ ) with torch.no_grad(): a__ : Tuple =model(lowerCAmelCase__ , sample_posterior=lowerCAmelCase__ , generator=lowerCAmelCase__ ).sample a__ : Any =output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": a__ : str =torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": a__ : Dict =torch.tensor( [-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] ) else: a__ : Optional[int] =torch.tensor( [-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] ) self.assertTrue(torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-2 ) ) @slow class __lowerCAmelCase ( unittest.TestCase): def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' return F'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCAmelCase__ ) for s in shape] )}.npy''' def _lowercase ( self ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self , lowerCAmelCase__=0 , lowerCAmelCase__=(4, 3, 5_1_2, 5_1_2) , lowerCAmelCase__=False ) -> Optional[Any]: '''simple docstring''' a__ : List[str] =torch.floataa if fpaa else torch.floataa a__ : Tuple =torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCAmelCase__ , lowerCAmelCase__ ) ) ).to(lowerCAmelCase__ ).to(lowerCAmelCase__ ) return image def _lowercase ( self , lowerCAmelCase__="CompVis/stable-diffusion-v1-4" , lowerCAmelCase__=False ) -> Any: '''simple docstring''' a__ : str ="fp16" if fpaa else None a__ : str =torch.floataa if fpaa else torch.floataa a__ : Dict =AutoencoderKL.from_pretrained( lowerCAmelCase__ , subfolder="vae" , torch_dtype=lowerCAmelCase__ , revision=lowerCAmelCase__ , ) model.to(lowerCAmelCase__ ).eval() return model def _lowercase ( self , lowerCAmelCase__=0 ) -> Optional[int]: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(lowerCAmelCase__ ) return torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) @parameterized.expand( [ # fmt: off [3_3, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [4_7, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' a__ : List[Any] =self.get_sd_vae_model() a__ : Tuple =self.get_sd_image(lowerCAmelCase__ ) a__ : Tuple =self.get_generator(lowerCAmelCase__ ) with torch.no_grad(): a__ : Tuple =model(lowerCAmelCase__ , generator=lowerCAmelCase__ , sample_posterior=lowerCAmelCase__ ).sample assert sample.shape == image.shape a__ : Tuple =sample[-1, -2:, -2:, :2].flatten().float().cpu() a__ : Tuple =torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [3_3, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]], [4_7, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]], # fmt: on ] ) @require_torch_gpu def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : Optional[int] =self.get_sd_vae_model(fpaa=lowerCAmelCase__ ) a__ : Any =self.get_sd_image(lowerCAmelCase__ , fpaa=lowerCAmelCase__ ) a__ : Dict =self.get_generator(lowerCAmelCase__ ) with torch.no_grad(): a__ : Union[str, Any] =model(lowerCAmelCase__ , generator=lowerCAmelCase__ , sample_posterior=lowerCAmelCase__ ).sample assert sample.shape == image.shape a__ : int =sample[-1, -2:, :2, -2:].flatten().float().cpu() a__ : List[str] =torch.tensor(lowerCAmelCase__ ) assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [4_7, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' a__ : Any =self.get_sd_vae_model() a__ : Optional[int] =self.get_sd_image(lowerCAmelCase__ ) with torch.no_grad(): a__ : Dict =model(lowerCAmelCase__ ).sample assert sample.shape == image.shape a__ : List[str] =sample[-1, -2:, -2:, :2].flatten().float().cpu() a__ : List[str] =torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [1_3, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]], [3_7, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]], # fmt: on ] ) @require_torch_gpu def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' a__ : List[Any] =self.get_sd_vae_model() a__ : Optional[Any] =self.get_sd_image(lowerCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): a__ : Optional[Any] =model.decode(lowerCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] a__ : List[Any] =sample[-1, -2:, :2, -2:].flatten().cpu() a__ : Dict =torch.tensor(lowerCAmelCase__ ) assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [2_7, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]], [1_6, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]], # fmt: on ] ) @require_torch_gpu def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : Optional[Any] =self.get_sd_vae_model(fpaa=lowerCAmelCase__ ) a__ : Tuple =self.get_sd_image(lowerCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=lowerCAmelCase__ ) with torch.no_grad(): a__ : Union[str, Any] =model.decode(lowerCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] a__ : Tuple =sample[-1, -2:, :2, -2:].flatten().float().cpu() a__ : Dict =torch.tensor(lowerCAmelCase__ ) assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=5E-3 ) @parameterized.expand([(1_3,), (1_6,), (2_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase ( self , lowerCAmelCase__ ) -> Dict: '''simple docstring''' a__ : int =self.get_sd_vae_model(fpaa=lowerCAmelCase__ ) a__ : List[Any] =self.get_sd_image(lowerCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=lowerCAmelCase__ ) with torch.no_grad(): a__ : Optional[int] =model.decode(lowerCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): a__ : Tuple =model.decode(lowerCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-1 ) @parameterized.expand([(1_3,), (1_6,), (3_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' a__ : Dict =self.get_sd_vae_model() a__ : str =self.get_sd_image(lowerCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): a__ : List[str] =model.decode(lowerCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): a__ : Optional[int] =model.decode(lowerCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]], [4_7, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]], # fmt: on ] ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' a__ : Tuple =self.get_sd_vae_model() a__ : int =self.get_sd_image(lowerCAmelCase__ ) a__ : Any =self.get_generator(lowerCAmelCase__ ) with torch.no_grad(): a__ : str =model.encode(lowerCAmelCase__ ).latent_dist a__ : Union[str, Any] =dist.sample(generator=lowerCAmelCase__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] a__ : Optional[Any] =sample[0, -1, -3:, -3:].flatten().cpu() a__ : List[str] =torch.tensor(lowerCAmelCase__ ) a__ : Union[str, Any] =3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=lowerCAmelCase__ )
95
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[str] ) -> List[Any]: lowercase_ = 1_0 def _lowercase ( self : int ) -> List[str]: lowercase_ = [1, 2, 3, 4] lowercase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int ) -> Optional[Any]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowercase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Any ) -> List[Any]: lowercase_ = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : List[str] ) -> List[str]: lowercase_ = '''''' lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase_ = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) lowercase_ , lowercase_ = process_story(SCREAMING_SNAKE_CASE_ ) lowercase_ = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = ['''It was the best of times.'''] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: lowercase_ = torch.tensor([1, 2, 3, 4] ) lowercase_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() ) def _lowercase ( self : List[Any] ) -> Tuple: lowercase_ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() ) def _lowercase ( self : int ) -> Dict: lowercase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowercase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() ) def _lowercase ( self : List[str] ) -> Tuple: lowercase_ = 1_0_1 lowercase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowercase_ = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
30
0
"""simple docstring""" # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
350
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase_ ( A_ ): lowercase__ = ['''image_processor''', '''tokenizer'''] lowercase__ = '''ViTImageProcessor''' lowercase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Optional[Any] , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=None , **snake_case_ : Optional[Any] ) -> List[str]: '''simple docstring''' A__ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , snake_case_ , ) A__ = kwargs.pop("feature_extractor" ) A__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(snake_case_ , snake_case_ ) def __call__( self : int , snake_case_ : Union[str, Any]=None , snake_case_ : int=None , snake_case_ : Dict=None , snake_case_ : int=None , **snake_case_ : List[str] ) -> Union[str, Any]: '''simple docstring''' if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: A__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ ) if visual_prompt is not None: A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ ) if images is not None: A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ ) if visual_prompt is not None and images is not None: A__ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: A__ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: A__ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ ) def __magic_name__ ( self : Tuple , *snake_case_ : List[str] , **snake_case_ : Optional[int] ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def __magic_name__ ( self : Optional[Any] , *snake_case_ : Any , **snake_case_ : List[Any] ) -> List[str]: '''simple docstring''' return self.tokenizer.decode(*snake_case_ , **snake_case_ ) @property def __magic_name__ ( self : int ) -> Optional[int]: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case_ , ) return self.image_processor_class @property def __magic_name__ ( self : int ) -> int: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case_ , ) return self.image_processor
230
0