code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = (DEISMultistepScheduler,) __lowerCamelCase = (("num_inference_steps", 25),) def UpperCAmelCase_ ( self , **snake_case__ ): '''simple docstring''' lowercase__ : Optional[Any]= { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, } config.update(**snake_case__ ) return config def UpperCAmelCase_ ( self , snake_case__=0 , **snake_case__ ): '''simple docstring''' lowercase__ : Tuple= dict(self.forward_default_kwargs ) lowercase__ : str= kwargs.pop("num_inference_steps" , snake_case__ ) lowercase__ : Optional[Any]= self.dummy_sample lowercase__ : Tuple= 0.1 * sample lowercase__ : str= [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowercase__ : List[Any]= self.get_scheduler_config(**snake_case__ ) lowercase__ : List[Any]= scheduler_class(**snake_case__ ) scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals lowercase__ : Any= dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case__ ) lowercase__ : Union[str, Any]= scheduler_class.from_pretrained(snake_case__ ) new_scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals lowercase__ : int= dummy_past_residuals[: new_scheduler.config.solver_order] lowercase__, lowercase__ : Optional[Any]= sample, sample for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1 ): lowercase__ : str= scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowercase__ : Union[str, Any]= new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase_ ( self ): '''simple docstring''' pass def UpperCAmelCase_ ( self , snake_case__=0 , **snake_case__ ): '''simple docstring''' lowercase__ : Dict= dict(self.forward_default_kwargs ) lowercase__ : Dict= kwargs.pop("num_inference_steps" , snake_case__ ) lowercase__ : str= self.dummy_sample lowercase__ : Tuple= 0.1 * sample lowercase__ : Optional[Any]= [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowercase__ : List[Any]= self.get_scheduler_config() lowercase__ : Dict= scheduler_class(**snake_case__ ) scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals (must be after setting timesteps) lowercase__ : Optional[Any]= dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case__ ) lowercase__ : Tuple= scheduler_class.from_pretrained(snake_case__ ) # copy over dummy past residuals new_scheduler.set_timesteps(snake_case__ ) # copy over dummy past residual (must be after setting timesteps) lowercase__ : Dict= dummy_past_residuals[: new_scheduler.config.solver_order] lowercase__ : Union[str, Any]= scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowercase__ : Union[str, Any]= new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase_ ( self , snake_case__=None , **snake_case__ ): '''simple docstring''' if scheduler is None: lowercase__ : str= self.scheduler_classes[0] lowercase__ : int= self.get_scheduler_config(**snake_case__ ) lowercase__ : Dict= scheduler_class(**snake_case__ ) lowercase__ : int= self.scheduler_classes[0] lowercase__ : List[Any]= self.get_scheduler_config(**snake_case__ ) lowercase__ : List[Any]= scheduler_class(**snake_case__ ) lowercase__ : Any= 10 lowercase__ : Any= self.dummy_model() lowercase__ : int= self.dummy_sample_deter scheduler.set_timesteps(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): lowercase__ : List[str]= model(snake_case__ , snake_case__ ) lowercase__ : int= scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample return sample def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= dict(self.forward_default_kwargs ) lowercase__ : int= kwargs.pop("num_inference_steps" , snake_case__ ) for scheduler_class in self.scheduler_classes: lowercase__ : Dict= self.get_scheduler_config() lowercase__ : Optional[int]= scheduler_class(**snake_case__ ) lowercase__ : Any= self.dummy_sample lowercase__ : List[str]= 0.1 * sample if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ): scheduler.set_timesteps(snake_case__ ) elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ): lowercase__ : Any= num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase__ : Any= [residual + 0.2, residual + 0.15, residual + 0.10] lowercase__ : Union[str, Any]= dummy_past_residuals[: scheduler.config.solver_order] lowercase__ : int= scheduler.timesteps[5] lowercase__ : int= scheduler.timesteps[6] lowercase__ : Union[str, Any]= scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowercase__ : Dict= scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase_ ( self ): '''simple docstring''' # make sure that iterating over schedulers with same config names gives same results # for defaults lowercase__ : Union[str, Any]= DEISMultistepScheduler(**self.get_scheduler_config() ) lowercase__ : Any= self.full_loop(scheduler=snake_case__ ) lowercase__ : Dict= torch.mean(torch.abs(snake_case__ ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3 lowercase__ : Tuple= DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowercase__ : List[str]= DPMSolverMultistepScheduler.from_config(scheduler.config ) lowercase__ : Optional[int]= UniPCMultistepScheduler.from_config(scheduler.config ) lowercase__ : Optional[int]= DEISMultistepScheduler.from_config(scheduler.config ) lowercase__ : Dict= self.full_loop(scheduler=snake_case__ ) lowercase__ : List[Any]= torch.mean(torch.abs(snake_case__ ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3 def UpperCAmelCase_ ( self ): '''simple docstring''' for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.check_over_configs(thresholding=snake_case__ ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type="deis" , solver_order=snake_case__ , solver_type=snake_case__ , ) def UpperCAmelCase_ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , ) lowercase__ : int= self.full_loop( solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , ) assert not torch.isnan(snake_case__ ).any(), "Samples have nan numbers" def UpperCAmelCase_ ( self ): '''simple docstring''' self.check_over_configs(lower_order_final=snake_case__ ) self.check_over_configs(lower_order_final=snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=snake_case__ , time_step=0 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= self.full_loop() lowercase__ : Union[str, Any]= torch.mean(torch.abs(snake_case__ ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.full_loop(prediction_type="v_prediction" ) lowercase__ : List[Any]= torch.mean(torch.abs(snake_case__ ) ) assert abs(result_mean.item() - 0.0_91 ) < 1e-3 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= self.scheduler_classes[0] lowercase__ : Tuple= self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0 ) lowercase__ : Optional[int]= scheduler_class(**snake_case__ ) lowercase__ : Tuple= 10 lowercase__ : Dict= self.dummy_model() lowercase__ : Optional[Any]= self.dummy_sample_deter.half() scheduler.set_timesteps(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): lowercase__ : Union[str, Any]= model(snake_case__ , snake_case__ ) lowercase__ : Tuple= scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample assert sample.dtype == torch.floataa
85
"""simple docstring""" from __future__ import annotations def lowercase__(A ) ->list[int]: # This function is recursive """simple docstring""" lowercase__ : int= len(A ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowercase__ : str= array[0] lowercase__ : Optional[Any]= False lowercase__ : Any= 1 lowercase__ : list[int]= [] while not is_found and i < array_length: if array[i] < pivot: lowercase__ : Union[str, Any]= True lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]] lowercase__ : Union[str, Any]= longest_subsequence(A ) if len(A ) > len(A ): lowercase__ : List[str]= temp_array else: i += 1 lowercase__ : List[str]= [element for element in array[1:] if element >= pivot] lowercase__ : List[str]= [pivot, *longest_subsequence(A )] if len(A ) > len(A ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
85
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCAmelCase: """simple docstring""" def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ): '''simple docstring''' lowercase__ : int= parent lowercase__ : Union[str, Any]= batch_size lowercase__ : int= image_size lowercase__ : Union[str, Any]= num_channels lowercase__ : int= embeddings_size lowercase__ : Optional[Any]= hidden_sizes lowercase__ : List[Any]= depths lowercase__ : Optional[int]= is_training lowercase__ : Tuple= use_labels lowercase__ : str= hidden_act lowercase__ : Tuple= num_labels lowercase__ : Union[str, Any]= scope lowercase__ : Optional[Any]= len(snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Dict= None if self.use_labels: lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : str= self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : List[str]= TFRegNetModel(config=snake_case__ ) lowercase__ : Union[str, Any]= model(snake_case__ , training=snake_case__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : str= self.num_labels lowercase__ : str= TFRegNetForImageClassification(snake_case__ ) lowercase__ : List[str]= model(snake_case__ , labels=snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= self.prepare_config_and_inputs() lowercase__, lowercase__, lowercase__ : List[Any]= config_and_inputs lowercase__ : Union[str, Any]= {"pixel_values": pixel_values} return config, inputs_dict @require_tf class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () __lowerCamelCase = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= TFRegNetModelTester(self ) lowercase__ : List[Any]= ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def UpperCAmelCase_ ( self ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def UpperCAmelCase_ ( self ): '''simple docstring''' pass def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : List[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Tuple= model_class(snake_case__ ) lowercase__ : Union[str, Any]= inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Tuple= [*signature.parameters.keys()] lowercase__ : Tuple= ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ): lowercase__ : Optional[Any]= model_class(snake_case__ ) lowercase__ : str= model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ ) lowercase__ : Optional[int]= outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Any= self.model_tester.num_stages self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) lowercase__, lowercase__ : Any= self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int= ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ : int= layer_type lowercase__ : List[Any]= True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : int= True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : List[Any]= self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(snake_case__ , snake_case__ , snake_case__ , snake_case__={} ): lowercase__ : int= model(snake_case__ , return_dict=snake_case__ , **snake_case__ ) lowercase__ : List[str]= model(snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple() def recursive_check(snake_case__ , snake_case__ ): if isinstance(snake_case__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ): recursive_check(snake_case__ , snake_case__ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(snake_case__ , snake_case__ ) ) , msg=( "Tuple and dict output are not equal. Difference:" F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(snake_case__ , snake_case__ ) for model_class in self.all_model_classes: lowercase__ : int= model_class(snake_case__ ) lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : List[Any]= self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) lowercase__ : Optional[Any]= self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) lowercase__ : Any= self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ ) lowercase__ : Union[str, Any]= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : List[str]= self._prepare_for_class(snake_case__ , snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) lowercase__ : int= self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) lowercase__ : Dict= self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Dict= TFRegNetModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Union[str, Any]= Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase_ ( self ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase__ : List[str]= self.default_image_processor lowercase__ : Any= prepare_img() lowercase__ : List[str]= image_processor(images=snake_case__ , return_tensors="tf" ) # forward pass lowercase__ : Dict= model(**snake_case__ , training=snake_case__ ) # verify the logits lowercase__ : Tuple= tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) lowercase__ : str= tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , snake_case__ , atol=1e-4 )
85
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) a : List[str] = parser.parse_args() a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) a : Optional[Any] = CLIPImageProcessor() a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") a : Tuple = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
1
"""simple docstring""" import math def lowercase__(A , A ) ->float: """simple docstring""" if initial_intensity < 0: raise ValueError("The value of intensity cannot be negative" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(A ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="""malus_law""")
85
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
1
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase__(A , A ) ->str: """simple docstring""" assert isinstance(A , A ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowercase__(A , A , A ) ->List[Any]: """simple docstring""" lowercase__ : Any= tmp_path / "cache" lowercase__ : Any= {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Union[str, Any]= TextDatasetReader(A , cache_dir=A , keep_in_memory=A ).read() _check_text_dataset(A , A ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def lowercase__(A , A , A ) ->str: """simple docstring""" lowercase__ : int= tmp_path / "cache" lowercase__ : Optional[Any]= {"text": "string"} lowercase__ : int= features.copy() if features else default_expected_features lowercase__ : List[str]= ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ : str= TextDatasetReader(A , features=A , cache_dir=A ).read() _check_text_dataset(A , A ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowercase__(A , A , A ) ->List[str]: """simple docstring""" lowercase__ : Optional[Any]= tmp_path / "cache" lowercase__ : Tuple= {"text": "string"} lowercase__ : List[Any]= TextDatasetReader(A , cache_dir=A , split=A ).read() _check_text_dataset(A , A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowercase__(A , A , A ) ->List[Any]: """simple docstring""" if issubclass(A , A ): lowercase__ : Tuple= text_path elif issubclass(A , A ): lowercase__ : Union[str, Any]= [text_path] lowercase__ : List[Any]= tmp_path / "cache" lowercase__ : str= {"text": "string"} lowercase__ : Dict= TextDatasetReader(A , cache_dir=A ).read() _check_text_dataset(A , A ) def lowercase__(A , A , A=("train",) ) ->Optional[Any]: """simple docstring""" assert isinstance(A , A ) for split in splits: lowercase__ : List[Any]= dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowercase__(A , A , A ) ->str: """simple docstring""" lowercase__ : Optional[int]= tmp_path / "cache" lowercase__ : Union[str, Any]= {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Tuple= TextDatasetReader({"train": text_path} , cache_dir=A , keep_in_memory=A ).read() _check_text_datasetdict(A , A ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def lowercase__(A , A , A ) ->Optional[Any]: """simple docstring""" lowercase__ : Optional[int]= tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowercase__ : Optional[int]= {"text": "string"} lowercase__ : List[str]= features.copy() if features else default_expected_features lowercase__ : str= ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ : Union[str, Any]= TextDatasetReader({"train": text_path} , features=A , cache_dir=A ).read() _check_text_datasetdict(A , A ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowercase__(A , A , A ) ->int: """simple docstring""" if split: lowercase__ : int= {split: text_path} else: lowercase__ : str= "train" lowercase__ : int= {"train": text_path, "test": text_path} lowercase__ : Any= tmp_path / "cache" lowercase__ : Optional[Any]= {"text": "string"} lowercase__ : List[Any]= TextDatasetReader(A , cache_dir=A ).read() _check_text_datasetdict(A , A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
85
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
1
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html a : Dict = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __UpperCAmelCase: """simple docstring""" __lowerCamelCase = PegasusConfig __lowerCamelCase = {} __lowerCamelCase = "gelu" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ): '''simple docstring''' lowercase__ : Tuple= parent lowercase__ : int= batch_size lowercase__ : Tuple= seq_length lowercase__ : int= is_training lowercase__ : Optional[int]= use_labels lowercase__ : int= vocab_size lowercase__ : Optional[Any]= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : List[Any]= num_attention_heads lowercase__ : Optional[Any]= intermediate_size lowercase__ : int= hidden_dropout_prob lowercase__ : List[str]= attention_probs_dropout_prob lowercase__ : List[str]= max_position_embeddings lowercase__ : List[Any]= eos_token_id lowercase__ : Tuple= pad_token_id lowercase__ : Any= bos_token_id def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) lowercase__ : Dict= np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) lowercase__ : Optional[Any]= np.concatenate([input_ids, eos_tensor] , axis=1 ) lowercase__ : Optional[int]= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Union[str, Any]= self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase__ : List[str]= prepare_pegasus_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) return config, inputs_dict def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : int= 20 lowercase__ : Optional[int]= model_class_name(snake_case__ ) lowercase__ : Optional[Any]= model.encode(inputs_dict["input_ids"] ) lowercase__, lowercase__ : Optional[int]= ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowercase__ : List[str]= model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ ) lowercase__ : Dict= jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) lowercase__ : Any= jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase__ : List[str]= model.decode( decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , ) lowercase__ : Optional[int]= jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) lowercase__ : Optional[int]= model.decode( decoder_input_ids[:, -1:] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case__ , ) lowercase__ : Dict= model.decode(snake_case__ , snake_case__ ) lowercase__ : Optional[int]= np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : List[str]= 20 lowercase__ : List[str]= model_class_name(snake_case__ ) lowercase__ : Union[str, Any]= model.encode(inputs_dict["input_ids"] ) lowercase__, lowercase__ : Tuple= ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowercase__ : Optional[Any]= jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowercase__ : Any= model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ ) lowercase__ : Tuple= jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase__ : List[Any]= model.decode( decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , ) lowercase__ : List[Any]= jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) lowercase__ : int= model.decode( decoder_input_ids[:, -1:] , snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case__ , decoder_position_ids=snake_case__ , ) lowercase__ : List[str]= model.decode(snake_case__ , snake_case__ , decoder_attention_mask=snake_case__ ) lowercase__ : Any= np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' ) def lowercase__(A , A , A , A=None , A=None , ) ->Union[str, Any]: """simple docstring""" if attention_mask is None: lowercase__ : Optional[int]= np.not_equal(A , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: lowercase__ : List[str]= np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __lowerCamelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= FlaxPegasusModelTester(self ) lowercase__ : Tuple= ConfigTester(self , config_class=snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Any= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(snake_case__ , snake_case__ , snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Dict= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : List[Any]= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : Optional[int]= model_class(snake_case__ ) @jax.jit def encode_jitted(snake_case__ , snake_case__=None , **snake_case__ ): return model.encode(input_ids=snake_case__ , attention_mask=snake_case__ ) with self.subTest("JIT Enabled" ): lowercase__ : Any= encode_jitted(**snake_case__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Dict= encode_jitted(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) for jitted_output, output in zip(snake_case__ , snake_case__ ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Optional[int]= model_class(snake_case__ ) lowercase__ : Optional[Any]= model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) lowercase__ : Tuple= { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(snake_case__ , snake_case__ , snake_case__ ): return model.decode( decoder_input_ids=snake_case__ , decoder_attention_mask=snake_case__ , encoder_outputs=snake_case__ , ) with self.subTest("JIT Enabled" ): lowercase__ : Any= decode_jitted(**snake_case__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Optional[int]= decode_jitted(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) for jitted_output, output in zip(snake_case__ , snake_case__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ : Optional[Any]= model_class_name.from_pretrained("google/pegasus-large" , from_pt=snake_case__ ) lowercase__ : List[str]= np.ones((1, 1) ) lowercase__ : List[Any]= model(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" ) lowercase__ : Any= PegasusTokenizer.from_pretrained("google/pegasus-xsum" ) lowercase__ : Any= [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] lowercase__ : Dict= [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] lowercase__ : int= tokenizer(snake_case__ , return_tensors="np" , truncation=snake_case__ , max_length=512 , padding=snake_case__ ) lowercase__ : List[str]= model.generate(**snake_case__ , num_beams=2 ).sequences lowercase__ : Union[str, Any]= tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ ) assert tgt_text == decoded
85
"""simple docstring""" def lowercase__(A ) ->list: """simple docstring""" if n_term == "": return [] lowercase__ : list= [] for temp in range(int(A ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
85
1
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def lowercase__() ->tuple[list[int], int]: """simple docstring""" lowercase__ : Optional[int]= [randint(-1_000 , 1_000 ) for i in range(10 )] lowercase__ : str= randint(-5_000 , 5_000 ) return (arr, r) a : Optional[int] = make_dataset() def lowercase__(A , A ) ->tuple[int, ...]: """simple docstring""" for triplet in permutations(A , 3 ): if sum(A ) == target: return tuple(sorted(A ) ) return (0, 0, 0) def lowercase__(A , A ) ->tuple[int, int, int]: """simple docstring""" arr.sort() lowercase__ : Optional[int]= len(A ) for i in range(n - 1 ): lowercase__, lowercase__ : int= i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def lowercase__() ->tuple[float, float]: """simple docstring""" lowercase__ : List[str]= "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n" lowercase__ : Union[str, Any]= "\ntriplet_sum1(*dataset)\n" lowercase__ : Union[str, Any]= "\ntriplet_sum2(*dataset)\n" lowercase__ : List[str]= repeat(setup=A , stmt=A , repeat=5 , number=10_000 ) lowercase__ : Tuple= repeat(setup=A , stmt=A , repeat=5 , number=10_000 ) return (min(A ), min(A )) if __name__ == "__main__": from doctest import testmod testmod() a : Any = solution_times() print(F"""The time for naive implementation is {times[0]}.""") print(F"""The time for optimized implementation is {times[1]}.""")
85
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : int = logging.get_logger(__name__) a : str = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "big_bird" def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , ) lowercase__ : Dict= vocab_size lowercase__ : Optional[int]= max_position_embeddings lowercase__ : List[Any]= hidden_size lowercase__ : List[str]= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : Optional[int]= intermediate_size lowercase__ : Optional[int]= hidden_act lowercase__ : Tuple= hidden_dropout_prob lowercase__ : int= attention_probs_dropout_prob lowercase__ : int= initializer_range lowercase__ : List[Any]= type_vocab_size lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : Optional[Any]= use_cache lowercase__ : Union[str, Any]= rescale_embeddings lowercase__ : Union[str, Any]= attention_type lowercase__ : Any= use_bias lowercase__ : List[Any]= block_size lowercase__ : Optional[Any]= num_random_blocks lowercase__ : Optional[int]= classifier_dropout class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Tuple= {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
85
1
"""simple docstring""" import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowercase__(A , A , A ) ->str: """simple docstring""" if openai_config_file == "": lowercase__ : Any= OpenAIGPTConfig() else: lowercase__ : List[Any]= OpenAIGPTConfig.from_json_file(A ) lowercase__ : Optional[int]= OpenAIGPTModel(A ) # Load weights from numpy load_tf_weights_in_openai_gpt(A , A , A ) # Save pytorch-model lowercase__ : Union[str, Any]= pytorch_dump_folder_path + "/" + WEIGHTS_NAME lowercase__ : Any= pytorch_dump_folder_path + "/" + CONFIG_NAME print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict() , A ) print(f'''Save configuration file to {pytorch_config_dump_path}''' ) with open(A , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--openai_checkpoint_folder_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--openai_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) a : Union[str, Any] = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
85
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
1
"""simple docstring""" def lowercase__(A ) ->list: """simple docstring""" lowercase__ : List[Any]= [0] * len(A ) for i in range(1 , len(A ) ): # use last results for better performance - dynamic programming lowercase__ : List[Any]= prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: lowercase__ : List[Any]= prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 lowercase__ : Any= j return prefix_result def lowercase__(A ) ->int: """simple docstring""" return max(prefix_function(A ) ) if __name__ == "__main__": import doctest doctest.testmod()
85
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : str= [] for part_id in partition_order: lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(A ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Dict= Spark(A ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(2 ) lowercase__ : Optional[Any]= [1, 0] lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions. lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->int: """simple docstring""" lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(1 ) lowercase__ : str= SparkExamplesIterable(A ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(A ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : int= spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: lowercase__ : Optional[Any]= lambda A : x.reverse() lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] ) lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Any: """simple docstring""" lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Optional[int]= Spark(A ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
85
1
"""simple docstring""" from __future__ import annotations import math def lowercase__(A ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[Any]= str(A ) lowercase__ : List[str]= [n] for i in range(1 , len(A ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowercase__(A ) ->bool: """simple docstring""" if len(str(A ) ) > 3: if not is_prime(int(str(A )[-3:] ) ) or not is_prime(int(str(A )[:3] ) ): return False return True def lowercase__(A = 11 ) ->list[int]: """simple docstring""" lowercase__ : list[int]= [] lowercase__ : Any= 13 while len(A ) != count: if validate(A ): lowercase__ : str= list_truncated_nums(A ) if all(is_prime(A ) for i in list_nums ): list_truncated_primes.append(A ) num += 2 return list_truncated_primes def lowercase__() ->int: """simple docstring""" return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(F"""{sum(compute_truncated_primes(11)) = }""")
85
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ): '''simple docstring''' lowercase__ : Optional[int]= parent lowercase__ : Tuple= batch_size lowercase__ : Tuple= seq_length lowercase__ : str= is_training lowercase__ : str= use_input_lengths lowercase__ : Any= use_token_type_ids lowercase__ : List[Any]= use_labels lowercase__ : Optional[int]= gelu_activation lowercase__ : str= sinusoidal_embeddings lowercase__ : List[str]= causal lowercase__ : Any= asm lowercase__ : Optional[int]= n_langs lowercase__ : Union[str, Any]= vocab_size lowercase__ : int= n_special lowercase__ : Any= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : List[str]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : Any= max_position_embeddings lowercase__ : List[Any]= type_vocab_size lowercase__ : int= type_sequence_label_size lowercase__ : Any= initializer_range lowercase__ : Optional[int]= num_labels lowercase__ : Union[str, Any]= num_choices lowercase__ : List[Any]= summary_type lowercase__ : Optional[int]= use_proj lowercase__ : int= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple= None if self.use_input_lengths: lowercase__ : List[Any]= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase__ : Tuple= None if self.use_token_type_ids: lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase__ : str= None lowercase__ : Tuple= None lowercase__ : Dict= None if self.use_labels: lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float() lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any]= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : Any= FlaubertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) lowercase__ : str= model(snake_case__ , langs=snake_case__ ) lowercase__ : Any= model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[str]= model(snake_case__ ) lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= model(snake_case__ ) lowercase__ : Any= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) lowercase__ : List[str]= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) ((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple() lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) ((lowercase__), ) : List[Any]= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ ) lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= self.num_labels lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : int= self.num_choices lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any= model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) : Any= config_and_inputs lowercase__ : Tuple= { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase__ : List[Any]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) lowercase__ : List[str]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModelTester(self ) lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase__ : int= True lowercase__ : List[Any]= model_class(config=snake_case__ ) lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : Dict= torch.jit.trace( snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) ) lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ ) loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) ) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowercase__ : Optional[int]= model(snake_case__ )[0] lowercase__ : Optional[int]= torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case__ ) lowercase__ : Dict= torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
85
1
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS a : List[str] = logging.get_logger(__name__) a : Optional[Any] = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(*snake_case__ , **snake_case__ ) if config is None: assert isinstance(self.model , snake_case__ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F''' {self.model.__class__}''' ) lowercase__ : Optional[int]= self.model.config else: lowercase__ : int= config lowercase__ : Any= data_args lowercase__ : Union[str, Any]= self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' " padding.." ) if self.args.label_smoothing == 0: lowercase__ : List[str]= torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowercase__ : Any= label_smoothed_nll_loss def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' if self.optimizer is None: lowercase__ : Dict= ["bias", "LayerNorm.weight"] lowercase__ : Dict= [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], "weight_decay": 0.0, }, ] lowercase__ : List[str]= Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowercase__ : str= Adafactor lowercase__ : int= {"scale_parameter": False, "relative_step": False} else: lowercase__ : Union[str, Any]= AdamW lowercase__ : Optional[int]= { "betas": (self.args.adam_betaa, self.args.adam_betaa), "eps": self.args.adam_epsilon, } lowercase__ : Optional[Any]= self.args.learning_rate if self.sharded_ddp: lowercase__ : List[str]= OSS( params=snake_case__ , optim=snake_case__ , **snake_case__ , ) else: lowercase__ : Tuple= optimizer_cls(snake_case__ , **snake_case__ ) if self.lr_scheduler is None: lowercase__ : List[str]= self._get_lr_scheduler(snake_case__ ) else: # ignoring --lr_scheduler logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Dict= arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowercase__ : str= schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowercase__ : int= schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: lowercase__ : Optional[int]= schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ ) return scheduler def UpperCAmelCase_ ( self ): '''simple docstring''' if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowercase__ : List[Any]= model(**snake_case__ , use_cache=snake_case__ )[0] lowercase__ : Dict= self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models lowercase__, lowercase__ : List[Any]= model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2] else: # compute label smoothed loss lowercase__ : Dict= model(**snake_case__ , use_cache=snake_case__ )[0] lowercase__ : Optional[int]= torch.nn.functional.log_softmax(snake_case__ , dim=-1 ) lowercase__, lowercase__ : Union[str, Any]= self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Union[str, Any]= inputs.pop("labels" ) lowercase__, lowercase__ : List[str]= self._compute_loss(snake_case__ , snake_case__ , snake_case__ ) return loss def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ): '''simple docstring''' lowercase__ : Union[str, Any]= self._prepare_inputs(snake_case__ ) lowercase__ : List[Any]= { "max_length": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, "num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowercase__ : List[Any]= self.model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **snake_case__ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowercase__ : Tuple= self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] ) lowercase__ : int= inputs.pop("labels" ) with torch.no_grad(): # compute loss on predict data lowercase__, lowercase__ : Any= self._compute_loss(snake_case__ , snake_case__ , snake_case__ ) lowercase__ : Any= loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowercase__ : Union[str, Any]= generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowercase__ : Optional[Any]= self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] ) return (loss, logits, labels) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' # If PAD token is not defined at least EOS token has to be defined lowercase__ : Any= self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( "Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be" F''' padded to `max_length`={max_length}''' ) lowercase__ : Any= pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) lowercase__ : List[Any]= tensor return padded_tensor
85
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = None class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 2 @register_to_config def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ): '''simple docstring''' # standard deviation of the initial noise distribution lowercase__ : int= sigma_max # setable values lowercase__ : int= None lowercase__ : np.IntTensor= None lowercase__ : torch.FloatTensor= None # sigma(t_i) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return sample def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : List[Any]= num_inference_steps lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy() lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ ) lowercase__ : Union[str, Any]= [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowercase__ : str= 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device ) lowercase__ : str= sigma + gamma * sigma lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : int= sample_prev + sigma_prev * model_output lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' raise NotImplementedError()
85
1
"""simple docstring""" from __future__ import annotations def lowercase__(A ) ->list[int]: # This function is recursive """simple docstring""" lowercase__ : int= len(A ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowercase__ : str= array[0] lowercase__ : Optional[Any]= False lowercase__ : Any= 1 lowercase__ : list[int]= [] while not is_found and i < array_length: if array[i] < pivot: lowercase__ : Union[str, Any]= True lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]] lowercase__ : Union[str, Any]= longest_subsequence(A ) if len(A ) > len(A ): lowercase__ : List[str]= temp_array else: i += 1 lowercase__ : List[str]= [element for element in array[1:] if element >= pivot] lowercase__ : List[str]= [pivot, *longest_subsequence(A )] if len(A ) > len(A ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
85
"""simple docstring""" from ....utils import logging a : List[str] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ): '''simple docstring''' lowercase__ : Dict= config.__dict__ lowercase__ : str= modal_hidden_size if num_labels: lowercase__ : List[str]= num_labels
85
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __UpperCAmelCase: """simple docstring""" __lowerCamelCase = BlenderbotSmallConfig __lowerCamelCase = {} __lowerCamelCase = "gelu" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ): '''simple docstring''' lowercase__ : Union[str, Any]= parent lowercase__ : List[Any]= batch_size lowercase__ : Optional[int]= seq_length lowercase__ : Optional[int]= is_training lowercase__ : Optional[int]= use_labels lowercase__ : Optional[int]= vocab_size lowercase__ : Optional[int]= hidden_size lowercase__ : List[str]= num_hidden_layers lowercase__ : Optional[Any]= num_attention_heads lowercase__ : Dict= intermediate_size lowercase__ : Optional[int]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : List[str]= max_position_embeddings lowercase__ : List[str]= eos_token_id lowercase__ : Any= pad_token_id lowercase__ : Dict= bos_token_id def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase__ : Union[str, Any]= tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase__ : Dict= tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase__ : Union[str, Any]= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Union[str, Any]= self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase__ : Union[str, Any]= prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) return config, inputs_dict def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Optional[Any]= TFBlenderbotSmallModel(config=snake_case__ ).get_decoder() lowercase__ : List[str]= inputs_dict["input_ids"] lowercase__ : Optional[int]= input_ids[:1, :] lowercase__ : Tuple= inputs_dict["attention_mask"][:1, :] lowercase__ : Optional[int]= inputs_dict["head_mask"] lowercase__ : int= 1 # first forward pass lowercase__ : Union[str, Any]= model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ ) lowercase__, lowercase__ : Union[str, Any]= outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase__ : Optional[int]= ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__ : Union[str, Any]= tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowercase__ : Tuple= tf.concat([input_ids, next_tokens] , axis=-1 ) lowercase__ : List[Any]= tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowercase__ : Optional[int]= model(snake_case__ , attention_mask=snake_case__ )[0] lowercase__ : Optional[Any]= model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowercase__ : Optional[int]= int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowercase__ : List[str]= output_from_no_past[:, -3:, random_slice_idx] lowercase__ : Union[str, Any]= output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 ) def lowercase__(A , A , A , A=None , A=None , A=None , A=None , A=None , ) ->Union[str, Any]: """simple docstring""" if attention_mask is None: lowercase__ : Optional[int]= tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase__ : List[str]= tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase__ : Tuple= tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase__ : List[str]= tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase__ : List[Any]= tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __lowerCamelCase = ( { "conversational": TFBlenderbotSmallForConditionalGeneration, "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __lowerCamelCase = True __lowerCamelCase = False __lowerCamelCase = False def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= TFBlenderbotSmallModelTester(self ) lowercase__ : Optional[Any]= ConfigTester(self , config_class=snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ ) @require_tokenizers @require_tf class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" __lowerCamelCase = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] __lowerCamelCase = "facebook/blenderbot_small-90M" @cached_property def UpperCAmelCase_ ( self ): '''simple docstring''' # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.tokenizer(self.src_text , return_tensors="tf" ) lowercase__ : Dict= self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , ) lowercase__ : Optional[Any]= self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
85
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Optional[int]= [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : Any= [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def lowercase__(A ) ->List[Any]: """simple docstring""" lowercase__ : Dict= [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") ) return token def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Dict= [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowercase__(A , A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : List[str]= "imagenet-1k-id2label.json" lowercase__ : List[str]= 1_000 lowercase__ : Tuple= "huggingface/label-files" lowercase__ : int= num_labels lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) ) lowercase__ : str= {int(A ): v for k, v in idalabel.items()} lowercase__ : Optional[int]= idalabel lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()} lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowercase__ : int= [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowercase__ : Union[str, Any]= [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Optional[Any]= [2, 2, 20] lowercase__ : Optional[Any]= [3, 12, 16] lowercase__ : List[str]= [192, 768, 1_024] lowercase__ : List[str]= CvtForImageClassification(A ) lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowercase__ : Dict= image_size lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) ) lowercase__ : Optional[Any]= OrderedDict() lowercase__ : Tuple= [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Optional[int]= list_of_state_dict + cls_token(A ) lowercase__ : List[str]= list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): lowercase__ : Dict= list_of_state_dict + attention(A , A ) lowercase__ : Optional[Any]= list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): lowercase__ : str= original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
85
1
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Any = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "mvp" __lowerCamelCase = ["past_key_values"] __lowerCamelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=50267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' lowercase__ : Union[str, Any]= vocab_size lowercase__ : List[str]= max_position_embeddings lowercase__ : Dict= d_model lowercase__ : List[str]= encoder_ffn_dim lowercase__ : Dict= encoder_layers lowercase__ : Union[str, Any]= encoder_attention_heads lowercase__ : Any= decoder_ffn_dim lowercase__ : Union[str, Any]= decoder_layers lowercase__ : Union[str, Any]= decoder_attention_heads lowercase__ : Dict= dropout lowercase__ : Tuple= attention_dropout lowercase__ : int= activation_dropout lowercase__ : str= activation_function lowercase__ : str= init_std lowercase__ : Any= encoder_layerdrop lowercase__ : int= decoder_layerdrop lowercase__ : Union[str, Any]= classifier_dropout lowercase__ : Optional[int]= use_cache lowercase__ : List[str]= encoder_layers lowercase__ : List[str]= scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ : str= use_prompt lowercase__ : Optional[int]= prompt_length lowercase__ : Optional[int]= prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , snake_case__ ): lowercase__ : Optional[int]= self.bos_token_id warnings.warn( F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' "The config can simply be saved and uploaded again to be fixed." )
85
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[Any]= self.unet.config.sample_size lowercase__ : Dict= (batch_size, 3, img_size, img_size) lowercase__ : List[Any]= self.unet lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma lowercase__ : Tuple= sample.to(self.device ) self.scheduler.set_timesteps(snake_case__ ) self.scheduler.set_sigmas(snake_case__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # prediction step lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ) lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean lowercase__ : List[str]= sample_mean.clamp(0 , 1 ) lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : str= self.numpy_to_pil(snake_case__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case__ )
85
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = None class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 2 @register_to_config def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ): '''simple docstring''' # standard deviation of the initial noise distribution lowercase__ : int= sigma_max # setable values lowercase__ : int= None lowercase__ : np.IntTensor= None lowercase__ : torch.FloatTensor= None # sigma(t_i) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return sample def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : List[Any]= num_inference_steps lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy() lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ ) lowercase__ : Union[str, Any]= [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowercase__ : str= 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device ) lowercase__ : str= sigma + gamma * sigma lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : int= sample_prev + sigma_prev * model_output lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' raise NotImplementedError()
85
"""simple docstring""" def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[str]= len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: lowercase__, lowercase__ : List[str]= numbers[j], numbers[i] return numbers if __name__ == "__main__": a : Dict = input("""Enter numbers separated by a comma:\n""").strip() a : List[str] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
85
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Union[str, Any] = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_text_model" __lowerCamelCase = ["past_key_values"] __lowerCamelCase = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' lowercase__ : int= vocab_size lowercase__ : Optional[Any]= hidden_size lowercase__ : Tuple= d_kv lowercase__ : Optional[int]= d_ff lowercase__ : Any= num_layers lowercase__ : Dict= num_heads lowercase__ : List[Any]= relative_attention_num_buckets lowercase__ : Optional[Any]= relative_attention_max_distance lowercase__ : Dict= dropout_rate lowercase__ : Tuple= layer_norm_epsilon lowercase__ : str= initializer_factor lowercase__ : Any= use_cache lowercase__ : Optional[int]= eos_token_id lowercase__ : str= decoder_start_token_id # for backwards compatibility lowercase__ : Optional[Any]= dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : str= config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_vision_model" def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Tuple= hidden_size lowercase__ : Tuple= patch_embed_hidden_size lowercase__ : Optional[Any]= d_ff lowercase__ : Dict= dropout_rate lowercase__ : Any= num_hidden_layers lowercase__ : Optional[int]= num_attention_heads lowercase__ : Dict= initializer_range lowercase__ : Tuple= initializer_factor lowercase__ : Tuple= attention_dropout lowercase__ : Optional[Any]= layer_norm_eps lowercase__ : List[Any]= dense_act_fn lowercase__ : str= seq_len lowercase__ : List[str]= relative_attention_num_buckets lowercase__ : Union[str, Any]= relative_attention_max_distance lowercase__ : Dict= d_kv @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : Union[str, Any]= config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct" __lowerCamelCase = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase__ : List[Any]= {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowercase__ : str= {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowercase__ : str= PixaStructTextConfig(**snake_case__ ) lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ ) lowercase__ : int= self.text_config.decoder_start_token_id lowercase__ : List[Any]= self.text_config.pad_token_id lowercase__ : Any= self.text_config.eos_token_id lowercase__ : Any= initializer_factor lowercase__ : int= initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : Dict= is_vqa @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ ) lowercase__ : str= self.text_config.to_dict() lowercase__ : str= self.vision_config.to_dict() lowercase__ : List[str]= self.__class__.model_type return output
85
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowercase__(A ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__() ->Iterator[int]: """simple docstring""" lowercase__ : Union[str, Any]= 2 while True: if is_prime(A ): yield num num += 1 def lowercase__(A = 2_000_000 ) ->int: """simple docstring""" return sum(takewhile(lambda A : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
85
1
"""simple docstring""" from heapq import heappop, heappush import numpy as np def lowercase__(A , A , A , A , ) ->tuple[float | int, list[tuple[int, int]]]: """simple docstring""" lowercase__, lowercase__ : Dict= grid.shape lowercase__ : Tuple= [-1, 1, 0, 0] lowercase__ : Optional[Any]= [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] lowercase__, lowercase__ : str= [(0, source)], set() lowercase__ : List[Any]= np.full((rows, cols) , np.inf ) lowercase__ : Any= 0 lowercase__ : Union[str, Any]= np.empty((rows, cols) , dtype=A ) lowercase__ : str= None while queue: ((lowercase__), (lowercase__)) : Optional[int]= heappop(A ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: lowercase__ : Tuple= [] while (x, y) != source: path.append((x, y) ) lowercase__, lowercase__ : List[str]= predecessors[x, y] path.append(A ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(A ) ): lowercase__, lowercase__ : str= x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: lowercase__ : Tuple= grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(A , (dist + 1, (nx, ny)) ) lowercase__ : str= dist + 1 lowercase__ : List[str]= (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
85
"""simple docstring""" def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Tuple= (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowercase__(A = 5_000 ) ->int: """simple docstring""" lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )] for i, pentagonal_i in enumerate(A ): for j in range(A , len(A ) ): lowercase__ : List[Any]= pentagonal_nums[j] lowercase__ : int= pentagonal_i + pentagonal_j lowercase__ : Optional[int]= pentagonal_j - pentagonal_i if is_pentagonal(A ) and is_pentagonal(A ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
85
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a : Optional[int] = logging.get_logger(__name__) a : Union[str, Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def lowercase__(A , A , A , A , A ) ->int: """simple docstring""" for attribute in key.split("." ): lowercase__ : Union[str, Any]= getattr(A , A ) if weight_type is not None: lowercase__ : Optional[Any]= getattr(A , A ).shape else: lowercase__ : Any= hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase__ : List[str]= value elif weight_type == "weight_g": lowercase__ : Tuple= value elif weight_type == "weight_v": lowercase__ : int= value elif weight_type == "bias": lowercase__ : List[str]= value else: lowercase__ : Optional[int]= value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowercase__(A , A , A ) ->Any: """simple docstring""" lowercase__ : Tuple= [] lowercase__ : Dict= fairseq_model.state_dict() lowercase__ : int= hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : str= False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == "group" , ) lowercase__ : Union[str, Any]= True else: for key, mapped_key in MAPPING.items(): lowercase__ : Dict= "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned): lowercase__ : Union[str, Any]= True if "*" in mapped_key: lowercase__ : Optional[Any]= name.split(A )[0].split("." )[-2] lowercase__ : Union[str, Any]= mapped_key.replace("*" , A ) if "weight_g" in name: lowercase__ : Dict= "weight_g" elif "weight_v" in name: lowercase__ : Optional[Any]= "weight_v" elif "weight" in name: lowercase__ : Dict= "weight" elif "bias" in name: lowercase__ : str= "bias" else: lowercase__ : str= None set_recursively(A , A , A , A , A ) continue if not is_used: unused_weights.append(A ) logger.warning(f'''Unused weights: {unused_weights}''' ) def lowercase__(A , A , A , A , A ) ->Optional[Any]: """simple docstring""" lowercase__ : List[Any]= full_name.split("conv_layers." )[-1] lowercase__ : int= name.split("." ) lowercase__ : Union[str, Any]= int(items[0] ) lowercase__ : Any= int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase__ : Optional[int]= value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase__ : List[Any]= value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowercase__ : List[str]= value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase__ : str= value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A ) @torch.no_grad() def lowercase__(A , A , A=None , A=None , A=True ) ->str: """simple docstring""" if config_path is not None: lowercase__ : List[Any]= HubertConfig.from_pretrained(A ) else: lowercase__ : Union[str, Any]= HubertConfig() if is_finetuned: if dict_path: lowercase__ : Tuple= Dictionary.load(A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase__ : Tuple= target_dict.pad_index lowercase__ : List[str]= target_dict.bos_index lowercase__ : Dict= target_dict.eos_index lowercase__ : Tuple= len(target_dict.symbols ) lowercase__ : List[Any]= os.path.join(A , "vocab.json" ) if not os.path.isdir(A ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A ) ) return os.makedirs(A , exist_ok=A ) with open(A , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , A ) lowercase__ : List[Any]= WavaVecaCTCTokenizer( A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=A , ) lowercase__ : Union[str, Any]= True if config.feat_extract_norm == "layer" else False lowercase__ : List[Any]= WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) lowercase__ : Union[str, Any]= WavaVecaProcessor(feature_extractor=A , tokenizer=A ) processor.save_pretrained(A ) lowercase__ : Optional[Any]= HubertForCTC(A ) else: lowercase__ : List[str]= HubertModel(A ) if is_finetuned: lowercase__, lowercase__, lowercase__ : Any= fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: lowercase__, lowercase__, lowercase__ : Union[str, Any]= fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowercase__ : List[str]= model[0].eval() recursively_load_weights(A , A , A ) hf_wavavec.save_pretrained(A ) if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a : str = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
85
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Union[str, Any] = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_text_model" __lowerCamelCase = ["past_key_values"] __lowerCamelCase = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' lowercase__ : int= vocab_size lowercase__ : Optional[Any]= hidden_size lowercase__ : Tuple= d_kv lowercase__ : Optional[int]= d_ff lowercase__ : Any= num_layers lowercase__ : Dict= num_heads lowercase__ : List[Any]= relative_attention_num_buckets lowercase__ : Optional[Any]= relative_attention_max_distance lowercase__ : Dict= dropout_rate lowercase__ : Tuple= layer_norm_epsilon lowercase__ : str= initializer_factor lowercase__ : Any= use_cache lowercase__ : Optional[int]= eos_token_id lowercase__ : str= decoder_start_token_id # for backwards compatibility lowercase__ : Optional[Any]= dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : str= config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_vision_model" def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Tuple= hidden_size lowercase__ : Tuple= patch_embed_hidden_size lowercase__ : Optional[Any]= d_ff lowercase__ : Dict= dropout_rate lowercase__ : Any= num_hidden_layers lowercase__ : Optional[int]= num_attention_heads lowercase__ : Dict= initializer_range lowercase__ : Tuple= initializer_factor lowercase__ : Tuple= attention_dropout lowercase__ : Optional[Any]= layer_norm_eps lowercase__ : List[Any]= dense_act_fn lowercase__ : str= seq_len lowercase__ : List[str]= relative_attention_num_buckets lowercase__ : Union[str, Any]= relative_attention_max_distance lowercase__ : Dict= d_kv @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : Union[str, Any]= config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct" __lowerCamelCase = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase__ : List[Any]= {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowercase__ : str= {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowercase__ : str= PixaStructTextConfig(**snake_case__ ) lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ ) lowercase__ : int= self.text_config.decoder_start_token_id lowercase__ : List[Any]= self.text_config.pad_token_id lowercase__ : Any= self.text_config.eos_token_id lowercase__ : Any= initializer_factor lowercase__ : int= initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : Dict= is_vqa @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ ) lowercase__ : str= self.text_config.to_dict() lowercase__ : str= self.vision_config.to_dict() lowercase__ : List[str]= self.__class__.model_type return output
85
1
"""simple docstring""" import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel a : str = HfApi() a : int = {} # fmt: off a : Union[str, Any] = torch.tensor([ -0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67, 1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89, -1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39, 0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57 ]) a : Tuple = torch.tensor([ -2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36, 1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08, -2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48, 2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65 ]) a : List[Any] = torch.tensor([ -0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69, -0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04, -0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25, 0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43 ]) a : List[Any] = torch.tensor([ 0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72, -0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09, 0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05, -0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05 ]) a : Any = torch.tensor([ 0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33, -0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95, 0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59, -0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86 ]) a : Any = torch.tensor([ 0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78, -0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30, 0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83, -0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31 ]) a : Any = torch.tensor([ 0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42, -0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98, 0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74, -0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90 ]) a : Union[str, Any] = torch.tensor([ 0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42, -0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90, 0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46, -0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73 ]) a : Dict = torch.tensor([ -1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30, 1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43, -2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10, 1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51]) a : str = torch.tensor([ -1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24, 0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81, -2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59, 1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66 ]) a : int = torch.tensor([ -1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12, 0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27, -2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31, 1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55 ]) a : int = torch.tensor([ -2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59, 1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51, -3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41, 3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66 ]) a : Dict = torch.tensor([ -2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40, 1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98, -2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95, 2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43 ]) a : Optional[int] = torch.tensor([ -2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36, 1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08, -3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60, 3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43 ]) a : Dict = torch.tensor([ -1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44, 1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91, -2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39, 1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19 ]) # fmt: on a : Optional[int] = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": a : Union[str, Any] = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1] print(F"""Started running {mod.modelId}!!!""") if mod.modelId.startswith("""CompVis"""): a : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) a : Optional[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) a : str = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): a : Tuple = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3 ) print(F"""{mod.modelId} has passed successfully!!!""")
85
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" ) lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy() lowercase__ : int= -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
85
1
"""simple docstring""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a : Optional[int] = logging.get_logger(__name__) a : Dict = {"""vocab_file""": """vocab.txt"""} a : Optional[int] = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } a : Tuple = { """openbmb/cpm-ant-10b""": 1024, } def lowercase__(A ) ->Tuple: """simple docstring""" lowercase__ : Optional[Any]= collections.OrderedDict() with open(A , "r" , encoding="utf-8" ) as reader: lowercase__ : str= reader.readlines() for index, token in enumerate(A ): lowercase__ : Dict= token.rstrip("\n" ) lowercase__ : Tuple= index return vocab class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__="<unk>" , snake_case__=200 ): '''simple docstring''' lowercase__ : Dict= vocab lowercase__ : Optional[int]= unk_token lowercase__ : int= max_input_chars_per_word def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : str= list(snake_case__ ) if len(snake_case__ ) > self.max_input_chars_per_word: return [self.unk_token] lowercase__ : Union[str, Any]= 0 lowercase__ : Union[str, Any]= [] while start < len(snake_case__ ): lowercase__ : Dict= len(snake_case__ ) lowercase__ : Optional[int]= None while start < end: lowercase__ : List[str]= "".join(chars[start:end] ) if substr in self.vocab: lowercase__ : Union[str, Any]= substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(snake_case__ ) lowercase__ : Tuple= end return sub_tokens class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = ["input_ids", "attention_mask"] __lowerCamelCase = False def __init__( self , snake_case__ , snake_case__="<d>" , snake_case__="</d>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="<unk>" , snake_case__="</n>" , snake_case__="</_>" , snake_case__="left" , **snake_case__ , ): '''simple docstring''' requires_backends(self , ["jieba"] ) super().__init__( bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , ) lowercase__ : Dict= bod_token lowercase__ : int= eod_token lowercase__ : Optional[int]= load_vocab(snake_case__ ) lowercase__ : Optional[Any]= self.encoder[space_token] lowercase__ : List[Any]= self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] lowercase__ : str= collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) lowercase__ : Union[str, Any]= {v: k for k, v in self.encoder.items()} lowercase__ : List[Any]= WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return self.encoder[self.bod_token] @property def UpperCAmelCase_ ( self ): '''simple docstring''' return self.encoder[self.eod_token] @property def UpperCAmelCase_ ( self ): '''simple docstring''' return self.encoder["\n"] @property def UpperCAmelCase_ ( self ): '''simple docstring''' return len(self.encoder ) def UpperCAmelCase_ ( self ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Any= [] for x in jieba.cut(snake_case__ , cut_all=snake_case__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) ) return output_tokens def UpperCAmelCase_ ( self , snake_case__ , **snake_case__ ): '''simple docstring''' lowercase__ : int= [i for i in token_ids if i >= 0] lowercase__ : List[Any]= [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return token in self.encoder def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return "".join(snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if os.path.isdir(snake_case__ ): lowercase__ : List[str]= os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: lowercase__ : int= (filename_prefix + "-" if filename_prefix else "") + save_directory lowercase__ : Optional[Any]= 0 if " " in self.encoder: lowercase__ : int= self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: lowercase__ : Any= self.encoder["\n"] del self.encoder["\n"] lowercase__ : Union[str, Any]= collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) with open(snake_case__ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) lowercase__ : Any= token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is not None: return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) return [1] + ([0] * len(snake_case__ ))
85
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "BridgeTowerImageProcessor" __lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[int]= self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # add pixel_values + pixel_mask lowercase__ : Optional[int]= self.image_processor( snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ ) encoding.update(snake_case__ ) return encoding def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.tokenizer.model_input_names lowercase__ : List[Any]= self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
85
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def lowercase__(A ) ->list[list[float]]: """simple docstring""" lowercase__ : List[str]= Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(A ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowercase__ : Union[str, Any]= float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creates a copy of the matrix with swapped positions of the elements lowercase__ : Any= [[0.0, 0.0], [0.0, 0.0]] lowercase__, lowercase__ : List[str]= matrix[1][1], matrix[0][0] lowercase__, lowercase__ : Union[str, Any]= -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(A ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(A ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowercase__ : Any= float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("This matrix has no inverse." ) # Creating cofactor matrix lowercase__ : Tuple= [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowercase__ : int= (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowercase__ : str= -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowercase__ : Optional[Any]= (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowercase__ : Dict= -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowercase__ : Any= (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowercase__ : str= -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowercase__ : Tuple= (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowercase__ : Dict= -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowercase__ : Tuple= (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowercase__ : Optional[Any]= array(A ) for i in range(3 ): for j in range(3 ): lowercase__ : Union[str, Any]= cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowercase__ : Union[str, Any]= array(A ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(A ) # Calculate the inverse of the matrix return [[float(d(A ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
85
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= tempfile.mkdtemp() lowercase__ : Optional[Any]= 8 # DPR tok lowercase__ : Tuple= [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok lowercase__ : List[Any]= [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple= {"unk_token": "<unk>"} lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.get_dummy_dataset() lowercase__ : Optional[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= dataset lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Dict= self.get_dummy_dataset() lowercase__ : Tuple= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" ) lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset lowercase__ : List[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(snake_case__ , open(snake_case__ , "wb" ) ) lowercase__ : List[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) lowercase__ : Optional[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= self.get_dummy_dataset() retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Tuple= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= 1 lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : int= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : int= self.get_dummy_legacy_index_retriever() lowercase__ : Optional[Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : str= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' import torch lowercase__ : str= 1 lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : str= [[5, 7], [10, 11]] lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) lowercase__, lowercase__, lowercase__ : Optional[int]= ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , np.ndarray ) lowercase__ : Any= retriever( snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , ) lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer() lowercase__ : Dict= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) retriever.set_ctx_encoder_tokenizer(snake_case__ ) lowercase__ : List[str]= [[5, 7], [10, 11]] lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) self.assertEqual( len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
85
1
"""simple docstring""" import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def lowercase__(A , A="shi-labs/oneformer_demo" ) ->Any: """simple docstring""" with open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) as f: lowercase__ : List[str]= json.load(A ) lowercase__ : str= {} lowercase__ : int= [] lowercase__ : Optional[int]= [] for key, info in class_info.items(): lowercase__ : List[str]= info["name"] class_names.append(info["name"] ) if info["isthing"]: thing_ids.append(int(A ) ) lowercase__ : Union[str, Any]= thing_ids lowercase__ : List[Any]= class_names return metadata class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=30 , snake_case__=400 , snake_case__=None , snake_case__=True , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , snake_case__=10 , snake_case__=False , snake_case__=255 , snake_case__="shi-labs/oneformer_demo" , snake_case__="ade20k_panoptic.json" , snake_case__=10 , ): '''simple docstring''' lowercase__ : Tuple= parent lowercase__ : List[Any]= batch_size lowercase__ : Tuple= num_channels lowercase__ : Optional[int]= min_resolution lowercase__ : List[str]= max_resolution lowercase__ : str= do_resize lowercase__ : Any= {"shortest_edge": 32, "longest_edge": 1333} if size is None else size lowercase__ : int= do_normalize lowercase__ : Any= image_mean lowercase__ : Union[str, Any]= image_std lowercase__ : str= class_info_file lowercase__ : Tuple= prepare_metadata(snake_case__ , snake_case__ ) lowercase__ : Optional[Any]= num_text lowercase__ : Union[str, Any]= repo_path # for the post_process_functions lowercase__ : str= 2 lowercase__ : str= 10 lowercase__ : Union[str, Any]= 10 lowercase__ : Optional[int]= 3 lowercase__ : Any= 4 lowercase__ : str= num_labels lowercase__ : str= do_reduce_labels lowercase__ : Any= ignore_index def UpperCAmelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False ): '''simple docstring''' if not batched: lowercase__ : int= image_inputs[0] if isinstance(snake_case__ , Image.Image ): lowercase__, lowercase__ : Tuple= image.size else: lowercase__, lowercase__ : Dict= image.shape[1], image.shape[2] if w < h: lowercase__ : List[str]= int(self.size["shortest_edge"] * h / w ) lowercase__ : Any= self.size["shortest_edge"] elif w > h: lowercase__ : Optional[Any]= self.size["shortest_edge"] lowercase__ : int= int(self.size["shortest_edge"] * w / h ) else: lowercase__ : List[Any]= self.size["shortest_edge"] lowercase__ : List[str]= self.size["shortest_edge"] else: lowercase__ : Optional[int]= [] for image in image_inputs: lowercase__, lowercase__ : str= self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase__ : Union[str, Any]= max(snake_case__ , key=lambda snake_case__ : item[0] )[0] lowercase__ : Union[str, Any]= max(snake_case__ , key=lambda snake_case__ : item[1] )[1] return expected_height, expected_width def UpperCAmelCase_ ( self ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __lowerCamelCase = image_processing_class def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= OneFormerImageProcessorTester(self ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "image_mean" ) ) self.assertTrue(hasattr(snake_case__ , "image_std" ) ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_resize" ) ) self.assertTrue(hasattr(snake_case__ , "size" ) ) self.assertTrue(hasattr(snake_case__ , "ignore_index" ) ) self.assertTrue(hasattr(snake_case__ , "class_info_file" ) ) self.assertTrue(hasattr(snake_case__ , "num_text" ) ) self.assertTrue(hasattr(snake_case__ , "repo_path" ) ) self.assertTrue(hasattr(snake_case__ , "metadata" ) ) self.assertTrue(hasattr(snake_case__ , "do_reduce_labels" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' pass def UpperCAmelCase_ ( self ): '''simple docstring''' # Initialize image_processor lowercase__ : Union[str, Any]= self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : str= prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input lowercase__ : List[Any]= image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values lowercase__, lowercase__ : str= self.image_processing_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__, lowercase__ : List[str]= self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ ) lowercase__ : str= image_processor( snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self ): '''simple docstring''' # Initialize image_processor lowercase__ : Optional[int]= self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : Any= prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) # Test not batched input lowercase__ : int= image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values lowercase__, lowercase__ : Optional[Any]= self.image_processing_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__, lowercase__ : Dict= self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ ) lowercase__ : List[str]= image_processor( snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self ): '''simple docstring''' # Initialize image_processor lowercase__ : Optional[Any]= self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : str= prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input lowercase__ : Tuple= image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values lowercase__, lowercase__ : Optional[Any]= self.image_processing_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__, lowercase__ : int= self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ ) lowercase__ : Optional[int]= image_processor( snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self , snake_case__=False , snake_case__=False , snake_case__="np" ): '''simple docstring''' lowercase__ : int= self.image_processing_class(**self.image_processor_dict ) # prepare image and target lowercase__ : str= self.image_processing_tester.num_labels lowercase__ : int= None lowercase__ : List[Any]= None lowercase__ : List[str]= prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ ) if with_segmentation_maps: lowercase__ : Dict= num_labels if is_instance_map: lowercase__ : Tuple= list(range(snake_case__ ) ) * 2 lowercase__ : Tuple= dict(enumerate(snake_case__ ) ) lowercase__ : Dict= [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": lowercase__ : int= [Image.fromarray(snake_case__ ) for annotation in annotations] lowercase__ : int= image_processor( snake_case__ , ["semantic"] * len(snake_case__ ) , snake_case__ , return_tensors="pt" , instance_id_to_semantic_id=snake_case__ , pad_and_return_pixel_mask=snake_case__ , ) return inputs def UpperCAmelCase_ ( self ): '''simple docstring''' pass def UpperCAmelCase_ ( self ): '''simple docstring''' def common(snake_case__=False , snake_case__=None ): lowercase__ : Tuple= self.comm_get_image_processor_inputs( with_segmentation_maps=snake_case__ , is_instance_map=snake_case__ , segmentation_type=snake_case__ ) lowercase__ : Dict= inputs["mask_labels"] lowercase__ : List[str]= inputs["class_labels"] lowercase__ : Optional[Any]= inputs["pixel_values"] lowercase__ : Dict= inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(snake_case__ , snake_case__ , snake_case__ ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(snake_case__ ) , self.image_processing_tester.num_text ) common() common(is_instance_map=snake_case__ ) common(is_instance_map=snake_case__ , segmentation_type="pil" ) common(is_instance_map=snake_case__ , segmentation_type="pil" ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= np.zeros((20, 50) ) lowercase__ : int= 1 lowercase__ : Optional[Any]= 1 lowercase__ : str= 1 lowercase__ : int= binary_mask_to_rle(snake_case__ ) self.assertEqual(len(snake_case__ ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) lowercase__ : Dict= self.image_processing_tester.get_fake_oneformer_outputs() lowercase__ : Tuple= fature_extractor.post_process_semantic_segmentation(snake_case__ ) self.assertEqual(len(snake_case__ ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) lowercase__ : Dict= [(1, 4) for i in range(self.image_processing_tester.batch_size )] lowercase__ : Tuple= fature_extractor.post_process_semantic_segmentation(snake_case__ , target_sizes=snake_case__ ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) lowercase__ : List[Any]= self.image_processing_tester.get_fake_oneformer_outputs() lowercase__ : List[str]= image_processor.post_process_instance_segmentation(snake_case__ , threshold=0 ) self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case__ ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) lowercase__ : int= self.image_processing_tester.get_fake_oneformer_outputs() lowercase__ : Union[str, Any]= image_processor.post_process_panoptic_segmentation(snake_case__ , threshold=0 ) self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case__ ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
85
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "AutoImageProcessor" __lowerCamelCase = "AutoTokenizer" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) lowercase__ : List[Any]= self.image_processor def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: lowercase__ : Any= image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
85
1
"""simple docstring""" def lowercase__(A , A = 0 ) ->list: """simple docstring""" lowercase__ : Any= length or len(A ) lowercase__ : Dict= False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: lowercase__, lowercase__ : Tuple= list_data[i + 1], list_data[i] lowercase__ : str= True return list_data if not swapped else bubble_sort(A , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
85
"""simple docstring""" a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/""" def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ): lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ : Tuple= len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ : str= b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ : str= ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ : Optional[Any]= encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ : List[Any]= encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ : str= encoded_data[:-padding] lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ : Any= [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
85
1
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def lowercase__(A = 3 ) ->qiskit.result.counts.Counts: """simple docstring""" if isinstance(A , A ): raise TypeError("number of qubits must be a integer." ) if number_of_qubits <= 0: raise ValueError("number of qubits must be > 0." ) if math.floor(A ) != number_of_qubits: raise ValueError("number of qubits must be exact integer." ) if number_of_qubits > 10: raise ValueError("number of qubits too large to simulate(>10)." ) lowercase__ : str= QuantumRegister(A , "qr" ) lowercase__ : Union[str, Any]= ClassicalRegister(A , "cr" ) lowercase__ : Optional[int]= QuantumCircuit(A , A ) lowercase__ : List[str]= number_of_qubits for i in range(A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , A , A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(A , A ) # simulate with 10000 shots lowercase__ : List[str]= Aer.get_backend("qasm_simulator" ) lowercase__ : Optional[int]= execute(A , A , shots=10_000 ) return job.result().get_counts(A ) if __name__ == "__main__": print( F"""Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}""" )
85
"""simple docstring""" from __future__ import annotations def lowercase__(A ) ->list[int]: # This function is recursive """simple docstring""" lowercase__ : int= len(A ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowercase__ : str= array[0] lowercase__ : Optional[Any]= False lowercase__ : Any= 1 lowercase__ : list[int]= [] while not is_found and i < array_length: if array[i] < pivot: lowercase__ : Union[str, Any]= True lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]] lowercase__ : Union[str, Any]= longest_subsequence(A ) if len(A ) > len(A ): lowercase__ : List[str]= temp_array else: i += 1 lowercase__ : List[str]= [element for element in array[1:] if element >= pivot] lowercase__ : List[str]= [pivot, *longest_subsequence(A )] if len(A ) > len(A ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
85
1
"""simple docstring""" import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "FlavaImageProcessor" __lowerCamelCase = ("BertTokenizer", "BertTokenizerFast") def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' lowercase__ : Any= None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , snake_case__ , ) lowercase__ : int= kwargs.pop("feature_extractor" ) lowercase__ : Any= image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(snake_case__ , snake_case__ ) lowercase__ : Optional[Any]= self.image_processor def __call__( self , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = False , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ): '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase__ : Any= self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) if images is not None: lowercase__ : Union[str, Any]= self.image_processor( snake_case__ , return_image_mask=snake_case__ , return_codebook_pixels=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) if text is not None and images is not None: encoding.update(snake_case__ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.tokenizer.model_input_names lowercase__ : Union[str, Any]= self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case__ , ) return self.image_processor_class @property def UpperCAmelCase_ ( self ): '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case__ , ) return self.image_processor
85
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) a : List[str] = parser.parse_args() a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) a : Optional[Any] = CLIPImageProcessor() a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") a : Tuple = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
1
"""simple docstring""" # This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def lowercase__(A , A , A , A ) ->Any: """simple docstring""" lowercase__ : Any= multiprocessing.Manager() lowercase__ : Any= manager.list() lowercase__ : List[str]= multiprocessing.Process(target=A , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("timed out" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def lowercase__(A , A , A ) ->Optional[int]: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowercase__ : Tuple= shutil.rmtree lowercase__ : int= os.rmdir lowercase__ : List[Any]= os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowercase__ : List[Any]= {} with swallow_io(): with time_limit(A ): exec(A , A ) result.append("passed" ) except TimeoutException: result.append("timed out" ) except BaseException as e: result.append(f'''failed: {e}''' ) # Needed for cleaning up. lowercase__ : List[str]= rmtree lowercase__ : Any= rmdir lowercase__ : str= chdir @contextlib.contextmanager def lowercase__(A ) ->str: """simple docstring""" def signal_handler(A , A ): raise TimeoutException("Timed out!" ) signal.setitimer(signal.ITIMER_REAL , A ) signal.signal(signal.SIGALRM , A ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def lowercase__() ->Optional[Any]: """simple docstring""" lowercase__ : List[Any]= WriteOnlyStringIO() with contextlib.redirect_stdout(A ): with contextlib.redirect_stderr(A ): with redirect_stdin(A ): yield @contextlib.contextmanager def lowercase__() ->List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(A ): yield dirname class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" pass class __UpperCAmelCase( io.StringIO ): """simple docstring""" def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' raise OSError def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' raise OSError def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' raise OSError def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return False class __UpperCAmelCase( contextlib._RedirectStream ): # type: ignore """simple docstring""" __lowerCamelCase = "stdin" @contextlib.contextmanager def lowercase__(A ) ->List[Any]: """simple docstring""" if root == ".": yield return lowercase__ : Optional[Any]= os.getcwd() os.chdir(A ) try: yield except BaseException as exc: raise exc finally: os.chdir(A ) def lowercase__(A=None ) ->Optional[int]: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowercase__ : Union[str, Any]= None lowercase__ : Optional[int]= None import os lowercase__ : Optional[Any]= "1" lowercase__ : str= None lowercase__ : List[Any]= None lowercase__ : Tuple= None lowercase__ : str= None lowercase__ : List[str]= None lowercase__ : Optional[Any]= None lowercase__ : Union[str, Any]= None lowercase__ : Optional[int]= None lowercase__ : Any= None lowercase__ : Tuple= None lowercase__ : int= None lowercase__ : Union[str, Any]= None lowercase__ : Any= None lowercase__ : str= None lowercase__ : Any= None lowercase__ : Union[str, Any]= None lowercase__ : List[str]= None lowercase__ : Any= None lowercase__ : Union[str, Any]= None lowercase__ : Union[str, Any]= None lowercase__ : List[str]= None lowercase__ : List[Any]= None lowercase__ : Optional[Any]= None lowercase__ : str= None lowercase__ : List[Any]= None lowercase__ : Optional[Any]= None lowercase__ : Tuple= None import shutil lowercase__ : List[str]= None lowercase__ : Dict= None lowercase__ : Any= None import subprocess lowercase__ : str= None # type: ignore lowercase__ : Any= None import sys lowercase__ : List[Any]= None lowercase__ : Tuple= None lowercase__ : Dict= None lowercase__ : List[str]= None lowercase__ : Optional[Any]= None
85
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
1
"""simple docstring""" import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType a : Optional[List[str]] = None a : Dict = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image a : Optional[Any] = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class __UpperCAmelCase: """simple docstring""" __lowerCamelCase = True __lowerCamelCase = None # Automatically constructed __lowerCamelCase = "PIL.Image.Image" __lowerCamelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} ) __lowerCamelCase = field(default="Image" , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__ ) def __call__( self ): '''simple docstring''' return self.pa_type def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if isinstance(snake_case__ , snake_case__ ): lowercase__ : Optional[Any]= np.array(snake_case__ ) if isinstance(snake_case__ , snake_case__ ): return {"path": value, "bytes": None} elif isinstance(snake_case__ , snake_case__ ): return {"path": None, "bytes": value} elif isinstance(snake_case__ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(snake_case__ ) elif isinstance(snake_case__ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(snake_case__ ) elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None ): '''simple docstring''' if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support decoding images, please install 'Pillow'." ) if token_per_repo_id is None: lowercase__ : List[Any]= {} lowercase__, lowercase__ : str= value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(snake_case__ ): lowercase__ : int= PIL.Image.open(snake_case__ ) else: lowercase__ : List[str]= path.split("::" )[-1] try: lowercase__ : List[Any]= string_to_dict(snake_case__ , config.HUB_DATASETS_URL )["repo_id"] lowercase__ : Tuple= token_per_repo_id.get(snake_case__ ) except ValueError: lowercase__ : List[Any]= None with xopen(snake_case__ , "rb" , use_auth_token=snake_case__ ) as f: lowercase__ : Optional[Any]= BytesIO(f.read() ) lowercase__ : Dict= PIL.Image.open(bytes_ ) else: lowercase__ : Any= PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def UpperCAmelCase_ ( self ): '''simple docstring''' from .features import Value return ( self if self.decode else { "bytes": Value("binary" ), "path": Value("string" ), } ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' if pa.types.is_string(storage.type ): lowercase__ : Optional[int]= pa.array([None] * len(snake_case__ ) , type=pa.binary() ) lowercase__ : Optional[int]= pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase__ : Optional[Any]= pa.array([None] * len(snake_case__ ) , type=pa.string() ) lowercase__ : List[str]= pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: lowercase__ : Union[str, Any]= storage.field("bytes" ) else: lowercase__ : Union[str, Any]= pa.array([None] * len(snake_case__ ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: lowercase__ : str= storage.field("path" ) else: lowercase__ : int= pa.array([None] * len(snake_case__ ) , type=pa.string() ) lowercase__ : List[Any]= pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowercase__ : Optional[int]= pa.array( [encode_np_array(np.array(snake_case__ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowercase__ : List[Any]= pa.array([None] * len(snake_case__ ) , type=pa.string() ) lowercase__ : List[Any]= pa.StructArray.from_arrays( [bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(snake_case__ , self.pa_type ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(snake_case__ ): with xopen(snake_case__ , "rb" ) as f: lowercase__ : Optional[int]= f.read() return bytes_ lowercase__ : List[str]= pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase__ : Optional[int]= pa.array( [os.path.basename(snake_case__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) lowercase__ : str= pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(snake_case__ , self.pa_type ) def lowercase__() ->List[str]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowercase__ : Any= list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowercase__(A ) ->bytes: """simple docstring""" lowercase__ : Union[str, Any]= BytesIO() if image.format in list_image_compression_formats(): lowercase__ : Dict= image.format else: lowercase__ : List[str]= "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF" image.save(A , format=A ) return buffer.getvalue() def lowercase__(A ) ->dict: """simple docstring""" if hasattr(A , "filename" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(A )} def lowercase__(A ) ->dict: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) lowercase__ : List[str]= array.dtype lowercase__ : Optional[int]= dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER lowercase__ : Optional[int]= dtype.kind lowercase__ : List[str]= dtype.itemsize lowercase__ : str= None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowercase__ : Optional[Any]= np.dtype("|u1" ) if dtype_kind not in ["u", "i"]: raise TypeError( f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowercase__ : Any= dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowercase__ : List[Any]= dtype_byteorder + dtype_kind + str(A ) lowercase__ : Dict= np.dtype(A ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) lowercase__ : Tuple= PIL.Image.fromarray(array.astype(A ) ) return {"path": None, "bytes": image_to_bytes(A )} def lowercase__(A ) ->List[dict]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if objs: lowercase__, lowercase__ : Any= first_non_null_value(A ) if isinstance(A , A ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(A , np.ndarray ): lowercase__ : Optional[Any]= no_op_if_value_is_null(A ) return [obj_to_image_dict_func(A ) for obj in objs] elif isinstance(A , PIL.Image.Image ): lowercase__ : List[str]= no_op_if_value_is_null(A ) return [obj_to_image_dict_func(A ) for obj in objs] else: return objs else: return objs
85
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
1
"""simple docstring""" import random class __UpperCAmelCase: """simple docstring""" @staticmethod def UpperCAmelCase_ ( snake_case__ ): '''simple docstring''' lowercase__ : int= [ord(snake_case__ ) for i in text] lowercase__ : Optional[int]= [] lowercase__ : Tuple= [] for i in plain: lowercase__ : int= random.randint(1 , 300 ) lowercase__ : int= (i + k) * k cipher.append(snake_case__ ) key.append(snake_case__ ) return cipher, key @staticmethod def UpperCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : List[str]= [] for i in range(len(snake_case__ ) ): lowercase__ : Tuple= int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(snake_case__ ) ) return "".join(snake_case__ ) if __name__ == "__main__": a , a : Optional[Any] = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
85
"""simple docstring""" def lowercase__(A ) ->list: """simple docstring""" if n_term == "": return [] lowercase__ : list= [] for temp in range(int(A ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
85
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a : Optional[Any] = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : int = logging.get_logger(__name__) a : str = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "big_bird" def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , ) lowercase__ : Dict= vocab_size lowercase__ : Optional[int]= max_position_embeddings lowercase__ : List[Any]= hidden_size lowercase__ : List[str]= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : Optional[int]= intermediate_size lowercase__ : Optional[int]= hidden_act lowercase__ : Tuple= hidden_dropout_prob lowercase__ : int= attention_probs_dropout_prob lowercase__ : int= initializer_range lowercase__ : List[Any]= type_vocab_size lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : Optional[Any]= use_cache lowercase__ : Union[str, Any]= rescale_embeddings lowercase__ : Union[str, Any]= attention_type lowercase__ : Any= use_bias lowercase__ : List[Any]= block_size lowercase__ : Optional[Any]= num_random_blocks lowercase__ : Optional[int]= classifier_dropout class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Tuple= {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
85
1
"""simple docstring""" def lowercase__(A , A ) ->int: """simple docstring""" return int((input_a, input_a).count(0 ) == 0 ) def lowercase__() ->None: """simple docstring""" assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
85
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
1
"""simple docstring""" def lowercase__(A = 1_000 ) ->int: """simple docstring""" lowercase__ : Any= 2**power lowercase__ : Tuple= 0 while n: lowercase__, lowercase__ : List[str]= r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
85
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : str= [] for part_id in partition_order: lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(A ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Dict= Spark(A ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(2 ) lowercase__ : Optional[Any]= [1, 0] lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions. lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->int: """simple docstring""" lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(1 ) lowercase__ : str= SparkExamplesIterable(A ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(A ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : int= spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: lowercase__ : Optional[Any]= lambda A : x.reverse() lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] ) lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Any: """simple docstring""" lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Optional[int]= Spark(A ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
85
1
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def lowercase__(A , A , A = "x" , A = 10**-10 , A = 1 , ) ->complex: """simple docstring""" lowercase__ : Union[str, Any]= symbols(A ) lowercase__ : Union[str, Any]= lambdify(A , A ) lowercase__ : List[str]= lambdify(A , diff(A , A ) ) lowercase__ : List[Any]= starting_point while True: if diff_function(A ) != 0: lowercase__ : List[Any]= prev_guess - multiplicity * func(A ) / diff_function( A ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowercase__ : str= next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
85
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ): '''simple docstring''' lowercase__ : Optional[int]= parent lowercase__ : Tuple= batch_size lowercase__ : Tuple= seq_length lowercase__ : str= is_training lowercase__ : str= use_input_lengths lowercase__ : Any= use_token_type_ids lowercase__ : List[Any]= use_labels lowercase__ : Optional[int]= gelu_activation lowercase__ : str= sinusoidal_embeddings lowercase__ : List[str]= causal lowercase__ : Any= asm lowercase__ : Optional[int]= n_langs lowercase__ : Union[str, Any]= vocab_size lowercase__ : int= n_special lowercase__ : Any= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : List[str]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : Any= max_position_embeddings lowercase__ : List[Any]= type_vocab_size lowercase__ : int= type_sequence_label_size lowercase__ : Any= initializer_range lowercase__ : Optional[int]= num_labels lowercase__ : Union[str, Any]= num_choices lowercase__ : List[Any]= summary_type lowercase__ : Optional[int]= use_proj lowercase__ : int= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple= None if self.use_input_lengths: lowercase__ : List[Any]= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase__ : Tuple= None if self.use_token_type_ids: lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase__ : str= None lowercase__ : Tuple= None lowercase__ : Dict= None if self.use_labels: lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float() lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any]= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : Any= FlaubertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) lowercase__ : str= model(snake_case__ , langs=snake_case__ ) lowercase__ : Any= model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[str]= model(snake_case__ ) lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= model(snake_case__ ) lowercase__ : Any= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) lowercase__ : List[str]= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) ((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple() lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) ((lowercase__), ) : List[Any]= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ ) lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= self.num_labels lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : int= self.num_choices lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any= model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) : Any= config_and_inputs lowercase__ : Tuple= { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase__ : List[Any]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) lowercase__ : List[str]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModelTester(self ) lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase__ : int= True lowercase__ : List[Any]= model_class(config=snake_case__ ) lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : Dict= torch.jit.trace( snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) ) lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ ) loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) ) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowercase__ : Optional[int]= model(snake_case__ )[0] lowercase__ : Optional[int]= torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case__ ) lowercase__ : Dict= torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
85
1
"""simple docstring""" import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py a : Optional[Any] = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. a : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS) a : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING a : List[Any] = { # used to compute the property `self.chunk_length` """EncodecConfig""": ["""overlap"""], # used as `self.bert_model = BertModel(config, ...)` """DPRConfig""": True, # not used in modeling files, but it's an important information """FSMTConfig""": ["""langs"""], # used internally in the configuration class file """GPTNeoConfig""": ["""attention_types"""], # used internally in the configuration class file """EsmConfig""": ["""is_folding_model"""], # used during training (despite we don't have training script for these models yet) """Mask2FormerConfig""": ["""ignore_value"""], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) """OneFormerConfig""": ["""ignore_value""", """norm"""], # used during preprocessing and collation, see `collating_graphormer.py` """GraphormerConfig""": ["""spatial_pos_max"""], # used internally in the configuration class file """T5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally """MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], """UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], # used internally in the configuration class file """LongT5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file """SwitchTransformersConfig""": ["""feed_forward_proj"""], # having default values other than `1e-5` - we can't fix them without breaking """BioGptConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """GLPNConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """SegformerConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """CvtConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """PerceiverConfig""": ["""layer_norm_eps"""], # used internally to calculate the feature size """InformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate `mlp_dim` """SamVisionConfig""": ["""mlp_ratio"""], # For (head) training, but so far not implemented """ClapAudioConfig""": ["""num_classes"""], # Not used, but providing useful information to users """SpeechT5HifiGanConfig""": ["""sampling_rate"""], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { """CLIPSegConfig""": True, """DeformableDetrConfig""": True, """DetaConfig""": True, """DinatConfig""": True, """DonutSwinConfig""": True, """EfficientFormerConfig""": True, """FSMTConfig""": True, """JukeboxConfig""": True, """LayoutLMv2Config""": True, """MaskFormerSwinConfig""": True, """MT5Config""": True, """NatConfig""": True, """OneFormerConfig""": True, """PerceiverConfig""": True, """RagConfig""": True, """SpeechT5Config""": True, """SwinConfig""": True, """Swin2SRConfig""": True, """Swinv2Config""": True, """SwitchTransformersConfig""": True, """TableTransformerConfig""": True, """TapasConfig""": True, """TransfoXLConfig""": True, """UniSpeechConfig""": True, """UniSpeechSatConfig""": True, """WavLMConfig""": True, """WhisperConfig""": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) """JukeboxPriorConfig""": True, # TODO: @Younes (for `is_decoder`) """Pix2StructTextConfig""": True, } ) def lowercase__(A , A , A , A ) ->Dict: """simple docstring""" lowercase__ : Optional[Any]= False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f'''config.{attribute}''' in modeling_source or f'''getattr(config, "{attribute}"''' in modeling_source or f'''getattr(self.config, "{attribute}"''' in modeling_source ): lowercase__ : Optional[int]= True # Deal with multi-line cases elif ( re.search( Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , A , ) is not None ): lowercase__ : Tuple= True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: lowercase__ : int= True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files lowercase__ : Optional[int]= [ "bos_index", "eos_index", "pad_index", "unk_index", "mask_index", "image_size", "use_cache", "out_features", "out_indices", ] lowercase__ : List[Any]= ["encoder_no_repeat_ngram_size"] # Special cases to be allowed lowercase__ : Any= True if not attribute_used: lowercase__ : List[str]= False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: lowercase__ : Dict= True elif attribute in ["tie_word_embeddings"] and default_value is False: lowercase__ : Dict= True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: lowercase__ : str= True elif attribute.endswith("_token_id" ): lowercase__ : Optional[Any]= True # configuration class specific cases if not case_allowed: lowercase__ : Optional[Any]= SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) lowercase__ : Dict= allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Optional[int]= dict(inspect.signature(config_class.__init__ ).parameters ) lowercase__ : List[str]= [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]] lowercase__ : str= [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass lowercase__ : str= {} if len(config_class.attribute_map ) > 0: lowercase__ : str= {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files lowercase__ : int= inspect.getsourcefile(A ) lowercase__ : Any= os.path.dirname(A ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. lowercase__ : str= [os.path.join(A , A ) for fn in os.listdir(A ) if fn.startswith("modeling_" )] # Get the source code strings lowercase__ : Tuple= [] for path in modeling_paths: if os.path.isfile(A ): with open(A ) as fp: modeling_sources.append(fp.read() ) lowercase__ : List[Any]= [] for config_param, default_value in zip(A , A ): # `attributes` here is all the variant names for `config_param` lowercase__ : Dict= [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(A , A , A , A ): unused_attributes.append(attributes[0] ) return sorted(A ) def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Optional[Any]= {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) lowercase__ : List[str]= [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda A : inspect.isclass(A ) and issubclass(A , A ) and inspect.getmodule(A ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: lowercase__ : List[Any]= check_config_attributes_being_used(A ) if len(A ) > 0: lowercase__ : List[Any]= unused_attributes if len(A ) > 0: lowercase__ : List[str]= "The following configuration classes contain unused attributes in the corresponding modeling files:\n" for name, attributes in configs_with_unused_attributes.items(): error += f'''{name}: {attributes}\n''' raise ValueError(A ) if __name__ == "__main__": check_config_attributes()
85
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = None class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 2 @register_to_config def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ): '''simple docstring''' # standard deviation of the initial noise distribution lowercase__ : int= sigma_max # setable values lowercase__ : int= None lowercase__ : np.IntTensor= None lowercase__ : torch.FloatTensor= None # sigma(t_i) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return sample def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : List[Any]= num_inference_steps lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy() lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ ) lowercase__ : Union[str, Any]= [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowercase__ : str= 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device ) lowercase__ : str= sigma + gamma * sigma lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : int= sample_prev + sigma_prev * model_output lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' raise NotImplementedError()
85
1
"""simple docstring""" from math import sqrt def lowercase__(A = 1_000_000 ) ->int: """simple docstring""" lowercase__ : int= 0 lowercase__ : int= 0 lowercase__ : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(A , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
85
"""simple docstring""" from ....utils import logging a : List[str] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ): '''simple docstring''' lowercase__ : Dict= config.__dict__ lowercase__ : str= modal_hidden_size if num_labels: lowercase__ : List[str]= num_labels
85
1
"""simple docstring""" # Algorithm for the pigeonhole sorting def lowercase__(A ) ->Dict: """simple docstring""" lowercase__ : Union[str, Any]= min(A ) # min() finds the minimum value lowercase__ : Optional[int]= max(A ) # max() finds the maximum value lowercase__ : Any= max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size lowercase__ : Tuple= [0] * size # Populate the pigeonholes. for x in a: assert isinstance(A , A ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. lowercase__ : List[str]= 0 for count in range(A ): while holes[count] > 0: holes[count] -= 1 lowercase__ : List[str]= count + min_val i += 1 def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(A ) print("Sorted order is:" , " ".join(A ) ) if __name__ == "__main__": main()
85
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Optional[int]= [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : Any= [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def lowercase__(A ) ->List[Any]: """simple docstring""" lowercase__ : Dict= [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") ) return token def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Dict= [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowercase__(A , A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : List[str]= "imagenet-1k-id2label.json" lowercase__ : List[str]= 1_000 lowercase__ : Tuple= "huggingface/label-files" lowercase__ : int= num_labels lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) ) lowercase__ : str= {int(A ): v for k, v in idalabel.items()} lowercase__ : Optional[int]= idalabel lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()} lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowercase__ : int= [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowercase__ : Union[str, Any]= [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Optional[Any]= [2, 2, 20] lowercase__ : Optional[Any]= [3, 12, 16] lowercase__ : List[str]= [192, 768, 1_024] lowercase__ : List[str]= CvtForImageClassification(A ) lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowercase__ : Dict= image_size lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) ) lowercase__ : Optional[Any]= OrderedDict() lowercase__ : Tuple= [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Optional[int]= list_of_state_dict + cls_token(A ) lowercase__ : List[str]= list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): lowercase__ : Dict= list_of_state_dict + attention(A , A ) lowercase__ : Optional[Any]= list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): lowercase__ : str= original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
85
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a : str = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[Any] = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Any = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[Any]= self.unet.config.sample_size lowercase__ : Dict= (batch_size, 3, img_size, img_size) lowercase__ : List[Any]= self.unet lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma lowercase__ : Tuple= sample.to(self.device ) self.scheduler.set_timesteps(snake_case__ ) self.scheduler.set_sigmas(snake_case__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # prediction step lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ) lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean lowercase__ : List[str]= sample_mean.clamp(0 , 1 ) lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : str= self.numpy_to_pil(snake_case__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case__ )
85
1
"""simple docstring""" def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Tuple= (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowercase__(A = 5_000 ) ->int: """simple docstring""" lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )] for i, pentagonal_i in enumerate(A ): for j in range(A , len(A ) ): lowercase__ : List[Any]= pentagonal_nums[j] lowercase__ : int= pentagonal_i + pentagonal_j lowercase__ : Optional[int]= pentagonal_j - pentagonal_i if is_pentagonal(A ) and is_pentagonal(A ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
85
"""simple docstring""" def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[str]= len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: lowercase__, lowercase__ : List[str]= numbers[j], numbers[i] return numbers if __name__ == "__main__": a : Dict = input("""Enter numbers separated by a comma:\n""").strip() a : List[str] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
85
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a : Dict = logging.get_logger(__name__) a : int = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "swin" __lowerCamelCase = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=224 , snake_case__=4 , snake_case__=3 , snake_case__=96 , snake_case__=[2, 2, 6, 2] , snake_case__=[3, 6, 12, 24] , snake_case__=7 , snake_case__=4.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=False , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=32 , snake_case__=None , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Optional[Any]= image_size lowercase__ : Union[str, Any]= patch_size lowercase__ : Dict= num_channels lowercase__ : Optional[Any]= embed_dim lowercase__ : Optional[int]= depths lowercase__ : str= len(snake_case__ ) lowercase__ : Union[str, Any]= num_heads lowercase__ : Dict= window_size lowercase__ : str= mlp_ratio lowercase__ : List[Any]= qkv_bias lowercase__ : int= hidden_dropout_prob lowercase__ : Optional[Any]= attention_probs_dropout_prob lowercase__ : str= drop_path_rate lowercase__ : Tuple= hidden_act lowercase__ : Union[str, Any]= use_absolute_embeddings lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : List[Any]= initializer_range lowercase__ : Dict= encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase__ : Union[str, Any]= int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) lowercase__ : Optional[Any]= ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(snake_case__ ) + 1 )] lowercase__, lowercase__ : Optional[Any]= get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = version.parse("1.11" ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return 1e-4
85
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowercase__(A ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__() ->Iterator[int]: """simple docstring""" lowercase__ : Union[str, Any]= 2 while True: if is_prime(A ): yield num num += 1 def lowercase__(A = 2_000_000 ) ->int: """simple docstring""" return sum(takewhile(lambda A : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
85
1
"""simple docstring""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a : Optional[Any] = False a : str = logging.get_logger(__name__) a : Optional[Any] = """ybelkada/fonts""" def lowercase__() ->Any: """simple docstring""" if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' "Pix2StructImageProcessor. Please upgrade torch." ) def lowercase__(A , A , A ) ->str: """simple docstring""" requires_backends(A , ["torch"] ) _check_torch_version() lowercase__ : List[str]= image_tensor.unsqueeze(0 ) lowercase__ : str= torch.nn.functional.unfold(A , (patch_height, patch_width) , stride=(patch_height, patch_width) ) lowercase__ : Optional[Any]= patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A , A , -1 ) lowercase__ : Dict= patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def lowercase__(A , A = 36 , A = "black" , A = "white" , A = 5 , A = 5 , A = 5 , A = 5 , A = None , A = None , ) ->Image.Image: """simple docstring""" requires_backends(A , "vision" ) # Add new lines so that each line is no more than 80 characters. lowercase__ : int= textwrap.TextWrapper(width=80 ) lowercase__ : str= wrapper.wrap(text=A ) lowercase__ : Dict= "\n".join(A ) if font_bytes is not None and font_path is None: lowercase__ : Any= io.BytesIO(A ) elif font_path is not None: lowercase__ : Tuple= font_path else: lowercase__ : Any= hf_hub_download(A , "Arial.TTF" ) lowercase__ : int= ImageFont.truetype(A , encoding="UTF-8" , size=A ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. lowercase__ : List[Any]= ImageDraw.Draw(Image.new("RGB" , (1, 1) , A ) ) lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= temp_draw.textbbox((0, 0) , A , A ) # Create the actual image with a bit of padding around the text. lowercase__ : Dict= text_width + left_padding + right_padding lowercase__ : Optional[Any]= text_height + top_padding + bottom_padding lowercase__ : Union[str, Any]= Image.new("RGB" , (image_width, image_height) , A ) lowercase__ : List[str]= ImageDraw.Draw(A ) draw.text(xy=(left_padding, top_padding) , text=A , fill=A , font=A ) return image def lowercase__(A , A , **A ) ->List[str]: """simple docstring""" requires_backends(A , "vision" ) # Convert to PIL image if necessary lowercase__ : Dict= to_pil_image(A ) lowercase__ : Tuple= render_text(A , **A ) lowercase__ : str= max(header_image.width , image.width ) lowercase__ : Optional[Any]= int(image.height * (new_width / image.width) ) lowercase__ : Optional[int]= int(header_image.height * (new_width / header_image.width) ) lowercase__ : List[str]= Image.new("RGB" , (new_width, new_height + new_header_height) , "white" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary lowercase__ : int= to_numpy_array(A ) if infer_channel_dimension_format(A ) == ChannelDimension.LAST: lowercase__ : Dict= to_channel_dimension_format(A , ChannelDimension.LAST ) return new_image class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["flattened_patches"] def __init__( self , snake_case__ = True , snake_case__ = True , snake_case__ = None , snake_case__ = 2048 , snake_case__ = False , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Tuple= patch_size if patch_size is not None else {"height": 16, "width": 16} lowercase__ : List[str]= do_normalize lowercase__ : int= do_convert_rgb lowercase__ : int= max_patches lowercase__ : Optional[int]= is_vqa def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self.extract_flattened_patches , "torch" ) _check_torch_version() # convert to torch lowercase__ : Tuple= to_channel_dimension_format(snake_case__ , ChannelDimension.FIRST ) lowercase__ : Dict= torch.from_numpy(snake_case__ ) lowercase__, lowercase__ : List[str]= patch_size["height"], patch_size["width"] lowercase__, lowercase__ : Optional[Any]= get_image_size(snake_case__ ) # maximize scale s.t. lowercase__ : Union[str, Any]= math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) lowercase__ : List[Any]= max(min(math.floor(scale * image_height / patch_height ) , snake_case__ ) , 1 ) lowercase__ : Optional[int]= max(min(math.floor(scale * image_width / patch_width ) , snake_case__ ) , 1 ) lowercase__ : Tuple= max(num_feasible_rows * patch_height , 1 ) lowercase__ : Tuple= max(num_feasible_cols * patch_width , 1 ) lowercase__ : List[str]= torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=snake_case__ , antialias=snake_case__ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] lowercase__ : Optional[Any]= torch_extract_patches(snake_case__ , snake_case__ , snake_case__ ) lowercase__ : str= patches.shape lowercase__ : Optional[int]= patches_shape[1] lowercase__ : str= patches_shape[2] lowercase__ : Optional[int]= patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] lowercase__ : int= patches.reshape([rows * columns, depth] ) # [rows * columns, 1] lowercase__ : List[Any]= torch.arange(snake_case__ ).reshape([rows, 1] ).repeat(1 , snake_case__ ).reshape([rows * columns, 1] ) lowercase__ : Optional[Any]= torch.arange(snake_case__ ).reshape([1, columns] ).repeat(snake_case__ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] lowercase__ : Tuple= row_ids.to(torch.floataa ) lowercase__ : List[Any]= col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] lowercase__ : int= torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] lowercase__ : List[str]= torch.nn.functional.pad(snake_case__ , [0, 0, 0, max_patches - (rows * columns)] ).float() lowercase__ : List[str]= to_numpy_array(snake_case__ ) return result def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , **snake_case__ ): '''simple docstring''' if image.dtype == np.uinta: lowercase__ : Tuple= image.astype(np.floataa ) # take mean across the whole `image` lowercase__ : Dict= np.mean(snake_case__ ) lowercase__ : Optional[int]= np.std(snake_case__ ) lowercase__ : List[Any]= max(snake_case__ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ): '''simple docstring''' lowercase__ : Any= do_normalize if do_normalize is not None else self.do_normalize lowercase__ : str= do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : List[str]= patch_size if patch_size is not None else self.patch_size lowercase__ : Optional[int]= max_patches if max_patches is not None else self.max_patches lowercase__ : str= self.is_vqa if kwargs.get("data_format" , snake_case__ ) is not None: raise ValueError("data_format is not an accepted input as the outputs are " ) lowercase__ : Optional[Any]= make_list_of_images(snake_case__ ) if not valid_images(snake_case__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : Any= [convert_to_rgb(snake_case__ ) for image in images] # All transformations expect numpy arrays. lowercase__ : Optional[int]= [to_numpy_array(snake_case__ ) for image in images] if is_vqa: if header_text is None: raise ValueError("A header text must be provided for VQA models." ) lowercase__ : List[str]= kwargs.pop("font_bytes" , snake_case__ ) lowercase__ : int= kwargs.pop("font_path" , snake_case__ ) if isinstance(snake_case__ , snake_case__ ): lowercase__ : Optional[int]= [header_text] * len(snake_case__ ) lowercase__ : Tuple= [ render_header(snake_case__ , header_text[i] , font_bytes=snake_case__ , font_path=snake_case__ ) for i, image in enumerate(snake_case__ ) ] if do_normalize: lowercase__ : str= [self.normalize(image=snake_case__ ) for image in images] # convert to torch tensor and permute lowercase__ : Any= [ self.extract_flattened_patches(image=snake_case__ , max_patches=snake_case__ , patch_size=snake_case__ ) for image in images ] # create attention mask in numpy lowercase__ : Tuple= [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] lowercase__ : Dict= BatchFeature( data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=snake_case__ ) return encoded_outputs
85
"""simple docstring""" def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Tuple= (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowercase__(A = 5_000 ) ->int: """simple docstring""" lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )] for i, pentagonal_i in enumerate(A ): for j in range(A , len(A ) ): lowercase__ : List[Any]= pentagonal_nums[j] lowercase__ : int= pentagonal_i + pentagonal_j lowercase__ : Optional[int]= pentagonal_j - pentagonal_i if is_pentagonal(A ) and is_pentagonal(A ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
85
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Union[str, Any] = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_text_model" __lowerCamelCase = ["past_key_values"] __lowerCamelCase = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' lowercase__ : int= vocab_size lowercase__ : Optional[Any]= hidden_size lowercase__ : Tuple= d_kv lowercase__ : Optional[int]= d_ff lowercase__ : Any= num_layers lowercase__ : Dict= num_heads lowercase__ : List[Any]= relative_attention_num_buckets lowercase__ : Optional[Any]= relative_attention_max_distance lowercase__ : Dict= dropout_rate lowercase__ : Tuple= layer_norm_epsilon lowercase__ : str= initializer_factor lowercase__ : Any= use_cache lowercase__ : Optional[int]= eos_token_id lowercase__ : str= decoder_start_token_id # for backwards compatibility lowercase__ : Optional[Any]= dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : str= config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_vision_model" def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Tuple= hidden_size lowercase__ : Tuple= patch_embed_hidden_size lowercase__ : Optional[Any]= d_ff lowercase__ : Dict= dropout_rate lowercase__ : Any= num_hidden_layers lowercase__ : Optional[int]= num_attention_heads lowercase__ : Dict= initializer_range lowercase__ : Tuple= initializer_factor lowercase__ : Tuple= attention_dropout lowercase__ : Optional[Any]= layer_norm_eps lowercase__ : List[Any]= dense_act_fn lowercase__ : str= seq_len lowercase__ : List[str]= relative_attention_num_buckets lowercase__ : Union[str, Any]= relative_attention_max_distance lowercase__ : Dict= d_kv @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : Union[str, Any]= config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct" __lowerCamelCase = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase__ : List[Any]= {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowercase__ : str= {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowercase__ : str= PixaStructTextConfig(**snake_case__ ) lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ ) lowercase__ : int= self.text_config.decoder_start_token_id lowercase__ : List[Any]= self.text_config.pad_token_id lowercase__ : Any= self.text_config.eos_token_id lowercase__ : Any= initializer_factor lowercase__ : int= initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : Dict= is_vqa @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ ) lowercase__ : str= self.text_config.to_dict() lowercase__ : str= self.vision_config.to_dict() lowercase__ : List[str]= self.__class__.model_type return output
85
1
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers a : List[str] = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : List[Any]= os.path.dirname(os.path.realpath(A ) ) lowercase__ : str= os.path.join(A , "words.txt" ) lowercase__ : Optional[Any]= "" with open(A ) as f: lowercase__ : str= f.readline() lowercase__ : List[str]= [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )] lowercase__ : List[Any]= [ word for word in [sum(ord(A ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(A ) if __name__ == "__main__": print(solution())
85
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" ) lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy() lowercase__ : int= -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
85
1
"""simple docstring""" import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) a : int = logging.getLogger(__name__) a : Optional[int] = """Hello world! cécé herlolip""" a : List[str] = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : List[Any]= BertAbsConfig( temp_dir="." , finetune_bert=A , large=A , share_emb=A , use_bert_emb=A , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , ) lowercase__ : Optional[Any]= torch.load(A , lambda A , A : storage ) lowercase__ : Dict= AbsSummarizer(A , torch.device("cpu" ) , A ) original.eval() lowercase__ : Union[str, Any]= BertAbsSummarizer(A , torch.device("cpu" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("convert the model" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("Make sure that the models' outputs are identical" ) lowercase__ : Union[str, Any]= BertTokenizer.from_pretrained("bert-base-uncased" ) # prepare the model inputs lowercase__ : str= tokenizer.encode("This is sample éàalj'-." ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A )) ) lowercase__ : List[Any]= torch.tensor(A ).unsqueeze(0 ) lowercase__ : Tuple= tokenizer.encode("This is sample 3 éàalj'-." ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A )) ) lowercase__ : int= torch.tensor(A ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass lowercase__ : Optional[Any]= encoder_input_ids lowercase__ : Union[str, Any]= decoder_input_ids lowercase__ : Tuple= None lowercase__ : List[str]= None lowercase__ : Dict= None lowercase__ : Any= None lowercase__ : Dict= None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical lowercase__ : List[str]= original(A , A , A , A , A , A , A )[0] lowercase__ : Optional[int]= original.generator(A ) lowercase__ : int= new_model( A , A , A , A , A )[0] lowercase__ : List[str]= new_model.generator(A ) lowercase__ : Any= torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("Maximum absolute difference beween weights: {:.2f}".format(A ) ) lowercase__ : Optional[Any]= torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("Maximum absolute difference beween weights: {:.2f}".format(A ) ) lowercase__ : Dict= torch.allclose(A , A , atol=1e-3 ) if are_identical: logging.info("all weights are equal up to 1e-3" ) else: raise ValueError("the weights are different. The new model is likely different from the original one." ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("saving the model's state dictionary" ) torch.save( new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) a : Union[str, Any] = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
85
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "BridgeTowerImageProcessor" __lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[int]= self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # add pixel_values + pixel_mask lowercase__ : Optional[int]= self.image_processor( snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ ) encoding.update(snake_case__ ) return encoding def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.tokenizer.model_input_names lowercase__ : List[Any]= self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
85
1
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= tempfile.mkdtemp() lowercase__ : Optional[Any]= 8 # DPR tok lowercase__ : Tuple= [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok lowercase__ : List[Any]= [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple= {"unk_token": "<unk>"} lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.get_dummy_dataset() lowercase__ : Optional[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= dataset lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Dict= self.get_dummy_dataset() lowercase__ : Tuple= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" ) lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset lowercase__ : List[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(snake_case__ , open(snake_case__ , "wb" ) ) lowercase__ : List[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) lowercase__ : Optional[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= self.get_dummy_dataset() retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Tuple= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= 1 lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : int= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : int= self.get_dummy_legacy_index_retriever() lowercase__ : Optional[Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : str= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' import torch lowercase__ : str= 1 lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : str= [[5, 7], [10, 11]] lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) lowercase__, lowercase__, lowercase__ : Optional[int]= ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , np.ndarray ) lowercase__ : Any= retriever( snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , ) lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer() lowercase__ : Dict= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) retriever.set_ctx_encoder_tokenizer(snake_case__ ) lowercase__ : List[str]= [[5, 7], [10, 11]] lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) self.assertEqual( len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
85
1
"""simple docstring""" import functools from typing import Any def lowercase__(A , A ) ->bool: """simple docstring""" if not isinstance(A , A ) or len(A ) == 0: raise ValueError("the string should be not empty string" ) if not isinstance(A , A ) or not all( isinstance(A , A ) and len(A ) > 0 for item in words ): raise ValueError("the words should be a list of non-empty strings" ) # Build trie lowercase__ : dict[str, Any]= {} lowercase__ : int= "WORD_KEEPER" for word in words: lowercase__ : Union[str, Any]= trie for c in word: if c not in trie_node: lowercase__ : Optional[Any]= {} lowercase__ : str= trie_node[c] lowercase__ : List[str]= True lowercase__ : Union[str, Any]= len(A ) # Dynamic programming method @functools.cache def is_breakable(A ) -> bool: if index == len_string: return True lowercase__ : Optional[int]= trie for i in range(A , A ): lowercase__ : Any= trie_node.get(string[i] , A ) if trie_node is None: return False if trie_node.get(A , A ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
85
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "AutoImageProcessor" __lowerCamelCase = "AutoTokenizer" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) lowercase__ : List[Any]= self.image_processor def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: lowercase__ : Any= image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
85
1
"""simple docstring""" def lowercase__(A ) ->int: """simple docstring""" if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowercase__ : List[str]= 1 lowercase__ : Optional[Any]= 1 while repunit: lowercase__ : Dict= (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase__(A = 1_000_000 ) ->int: """simple docstring""" lowercase__ : Any= limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(A ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F"""{solution() = }""")
85
"""simple docstring""" a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/""" def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ): lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ : Tuple= len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ : str= b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ : str= ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ : Optional[Any]= encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ : List[Any]= encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ : str= encoded_data[:-padding] lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ : Any= [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
85
1
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. a : Any = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. a : Any = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. a : int = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def lowercase__(A , A ) ->tuple[str, float]: """simple docstring""" lowercase__ : Dict= len([g for position, g in enumerate(A ) if g == main_target[position]] ) return (item, float(A )) def lowercase__(A , A ) ->tuple[str, str]: """simple docstring""" lowercase__ : int= random.randint(0 , len(A ) - 1 ) lowercase__ : Tuple= parent_a[:random_slice] + parent_a[random_slice:] lowercase__ : Dict= parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def lowercase__(A , A ) ->str: """simple docstring""" lowercase__ : str= list(A ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase__ : List[Any]= random.choice(A ) return "".join(A ) def lowercase__(A , A , A , ) ->list[str]: """simple docstring""" lowercase__ : Tuple= [] # Generate more children proportionally to the fitness score. lowercase__ : Optional[Any]= int(parent_a[1] * 100 ) + 1 lowercase__ : Optional[Any]= 10 if child_n >= 10 else child_n for _ in range(A ): lowercase__ : Optional[int]= population_score[random.randint(0 , A )][0] lowercase__, lowercase__ : Tuple= crossover(parent_a[0] , A ) # Append new string to the population list. pop.append(mutate(A , A ) ) pop.append(mutate(A , A ) ) return pop def lowercase__(A , A , A = True ) ->tuple[int, int, str]: """simple docstring""" if N_POPULATION < N_SELECTED: lowercase__ : Dict= f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(A ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase__ : Any= sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase__ : int= f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(A ) # Generate random starting population. lowercase__ : Dict= [] for _ in range(A ): population.append("".join([random.choice(A ) for i in range(len(A ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase__, lowercase__ : List[Any]= 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(A ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase__ : str= [evaluate(A , A ) for item in population] # Check if there is a matching evolution. lowercase__ : Union[str, Any]= sorted(A , key=lambda A : x[1] , reverse=A ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase__ : Dict= population[: int(N_POPULATION / 3 )] population.clear() population.extend(A ) # Normalize population score to be between 0 and 1. lowercase__ : str= [ (item, score / len(A )) for item, score in population_score ] # This is selection for i in range(A ): population.extend(select(population_score[int(A )] , A , A ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(A ) > N_POPULATION: break if __name__ == "__main__": a : str = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) a : Any = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) a , a , a : int = basic(target_str, genes_list) print( F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
85
"""simple docstring""" from __future__ import annotations def lowercase__(A ) ->list[int]: # This function is recursive """simple docstring""" lowercase__ : int= len(A ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowercase__ : str= array[0] lowercase__ : Optional[Any]= False lowercase__ : Any= 1 lowercase__ : list[int]= [] while not is_found and i < array_length: if array[i] < pivot: lowercase__ : Union[str, Any]= True lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]] lowercase__ : Union[str, Any]= longest_subsequence(A ) if len(A ) > len(A ): lowercase__ : List[str]= temp_array else: i += 1 lowercase__ : List[str]= [element for element in array[1:] if element >= pivot] lowercase__ : List[str]= [pivot, *longest_subsequence(A )] if len(A ) > len(A ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
85
1
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() a : str = logging.get_logger(__name__) a : Dict = """The Nymphenburg Palace is a beautiful palace in Munich!""" def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : Union[str, Any]= { "attention_cell": "multi_head", "num_layers": 4, "units": 1_024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1_024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1e-5, "token_type_vocab_size": 2, } lowercase__ : int= bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowercase__ : List[Any]= BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=A , output_all_encodings=A , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , A ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowercase__ : List[str]= "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab lowercase__ : Tuple= os.path.join(get_home_dir() , "models" ) lowercase__ : Tuple= _load_vocab(A , A , A , cls=A ) lowercase__ : Any= nlp.model.BERTModel( A , len(A ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=A , use_token_type_embed=A , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=A , use_decoder=A , ) original_bort.load_parameters(A , cast_dtype=A , ignore_extra=A ) lowercase__ : str= original_bort._collect_params_with_prefix() # Build our config 🤗 lowercase__ : Union[str, Any]= { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(A ), } lowercase__ : str= BertConfig.from_dict(A ) lowercase__ : List[str]= BertForMaskedLM(A ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(A ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(A , A ): lowercase__ : Optional[int]= hf_param.shape lowercase__ : int= to_torch(params[gluon_param] ) lowercase__ : List[str]= gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param lowercase__ : Union[str, Any]= check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) lowercase__ : int= check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) lowercase__ : int= check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) lowercase__ : Any= check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowercase__ : Union[str, Any]= torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowercase__ : BertLayer= hf_bort_model.bert.encoder.layer[i] # self attention lowercase__ : BertSelfAttention= layer.attention.self lowercase__ : Any= check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) lowercase__ : Union[str, Any]= check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) lowercase__ : str= check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) lowercase__ : Union[str, Any]= check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) lowercase__ : Tuple= check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) lowercase__ : Optional[int]= check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output lowercase__ : BertSelfOutput= layer.attention.output lowercase__ : int= check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) lowercase__ : Union[str, Any]= check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) lowercase__ : Optional[Any]= check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) lowercase__ : Tuple= check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate lowercase__ : BertIntermediate= layer.intermediate lowercase__ : Union[str, Any]= check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) lowercase__ : List[Any]= check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output lowercase__ : BertOutput= layer.output lowercase__ : List[Any]= check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) lowercase__ : List[Any]= check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) lowercase__ : Optional[int]= check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) lowercase__ : Union[str, Any]= check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowercase__ : Dict= RobertaTokenizer.from_pretrained("roberta-base" ) lowercase__ : Tuple= tokenizer.encode_plus(A )["input_ids"] # Get gluon output lowercase__ : Optional[Any]= mx.nd.array([input_ids] ) lowercase__ : Any= original_bort(inputs=A , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(A ) lowercase__ : List[Any]= BertModel.from_pretrained(A ) hf_bort_model.eval() lowercase__ : Optional[int]= tokenizer.encode_plus(A , return_tensors="pt" ) lowercase__ : Tuple= hf_bort_model(**A )[0] lowercase__ : Tuple= output_gluon[0].asnumpy() lowercase__ : Optional[Any]= output_hf[0].detach().numpy() lowercase__ : Tuple= np.max(np.abs(hf_layer - gluon_layer ) ).item() lowercase__ : Optional[int]= np.allclose(A , A , atol=1e-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , A ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a : Optional[Any] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
85
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) a : List[str] = parser.parse_args() a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) a : Optional[Any] = CLIPImageProcessor() a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") a : Tuple = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
1
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers a : Optional[Any] = """python tqdm regex requests packaging filelock numpy tokenizers""".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("""dataclasses""") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("""importlib_metadata""") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def lowercase__(A , A=None ) ->Optional[int]: """simple docstring""" require_version(deps[pkg] , A )
85
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
1
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = 50257 , snake_case__ = 1024 , snake_case__ = 768 , snake_case__ = 12 , snake_case__ = 12 , snake_case__ = None , snake_case__ = "gelu_new" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 1e-5 , snake_case__ = 0.02 , snake_case__ = True , snake_case__ = True , snake_case__ = False , snake_case__ = False , ): '''simple docstring''' super().__init__() lowercase__ : str= prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) lowercase__ : Dict= prefix_inner_dim lowercase__ : Dict= prefix_hidden_dim lowercase__ : int= ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowercase__ : Optional[Any]= ( nn.Linear(self.prefix_hidden_dim , snake_case__ ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowercase__ : Union[str, Any]= GPTaConfig( vocab_size=snake_case__ , n_positions=snake_case__ , n_embd=snake_case__ , n_layer=snake_case__ , n_head=snake_case__ , n_inner=snake_case__ , activation_function=snake_case__ , resid_pdrop=snake_case__ , embd_pdrop=snake_case__ , attn_pdrop=snake_case__ , layer_norm_epsilon=snake_case__ , initializer_range=snake_case__ , scale_attn_weights=snake_case__ , use_cache=snake_case__ , scale_attn_by_inverse_layer_idx=snake_case__ , reorder_and_upcast_attn=snake_case__ , ) lowercase__ : Optional[int]= GPTaLMHeadModel(snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , ): '''simple docstring''' lowercase__ : Tuple= self.transformer.transformer.wte(snake_case__ ) lowercase__ : Any= self.encode_prefix(snake_case__ ) lowercase__ : Any= self.decode_prefix(snake_case__ ) lowercase__ : Tuple= torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: lowercase__ : List[str]= self.get_dummy_token(input_ids.shape[0] , input_ids.device ) lowercase__ : Optional[Any]= torch.cat((dummy_token, input_ids) , dim=1 ) lowercase__ : Union[str, Any]= self.transformer(inputs_embeds=snake_case__ , labels=snake_case__ , attention_mask=snake_case__ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' return torch.zeros(snake_case__ , self.prefix_length , dtype=torch.intaa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return self.encode_prefix(snake_case__ ) @torch.no_grad() def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : str= torch.split(snake_case__ , 1 , dim=0 ) lowercase__ : Any= [] lowercase__ : Any= [] for feature in features: lowercase__ : Tuple= self.decode_prefix(feature.to(snake_case__ ) ) # back to the clip feature # Only support beam search for now lowercase__, lowercase__ : Any= self.generate_beam( input_embeds=snake_case__ , device=snake_case__ , eos_token_id=snake_case__ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) lowercase__ : int= torch.stack(snake_case__ ) lowercase__ : int= torch.stack(snake_case__ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def UpperCAmelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = 5 , snake_case__ = 67 , snake_case__ = 1.0 , snake_case__ = None , ): '''simple docstring''' lowercase__ : List[Any]= eos_token_id lowercase__ : Tuple= None lowercase__ : Dict= None lowercase__ : int= torch.ones(snake_case__ , device=snake_case__ , dtype=torch.int ) lowercase__ : List[str]= torch.zeros(snake_case__ , device=snake_case__ , dtype=torch.bool ) if input_embeds is not None: lowercase__ : Dict= input_embeds else: lowercase__ : int= self.transformer.transformer.wte(snake_case__ ) for i in range(snake_case__ ): lowercase__ : Dict= self.transformer(inputs_embeds=snake_case__ ) lowercase__ : Any= outputs.logits lowercase__ : str= logits[:, -1, :] / (temperature if temperature > 0 else 1.0) lowercase__ : int= logits.softmax(-1 ).log() if scores is None: lowercase__, lowercase__ : int= logits.topk(snake_case__ , -1 ) lowercase__ : Any= generated.expand(snake_case__ , *generated.shape[1:] ) lowercase__, lowercase__ : Tuple= next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: lowercase__ : str= next_tokens else: lowercase__ : Dict= tokens.expand(snake_case__ , *tokens.shape[1:] ) lowercase__ : Tuple= torch.cat((tokens, next_tokens) , dim=1 ) else: lowercase__ : Union[str, Any]= -float(np.inf ) lowercase__ : Optional[int]= 0 lowercase__ : Optional[Any]= scores[:, None] + logits seq_lengths[~is_stopped] += 1 lowercase__ : List[Any]= scores_sum / seq_lengths[:, None] lowercase__, lowercase__ : Tuple= scores_sum_average.view(-1 ).topk(snake_case__ , -1 ) lowercase__ : Tuple= next_tokens // scores_sum.shape[1] lowercase__ : Any= seq_lengths[next_tokens_source] lowercase__ : Any= next_tokens % scores_sum.shape[1] lowercase__ : Tuple= next_tokens.unsqueeze(1 ) lowercase__ : List[str]= tokens[next_tokens_source] lowercase__ : Tuple= torch.cat((tokens, next_tokens) , dim=1 ) lowercase__ : List[str]= generated[next_tokens_source] lowercase__ : int= scores_sum_average * seq_lengths lowercase__ : List[str]= is_stopped[next_tokens_source] lowercase__ : Union[str, Any]= self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) lowercase__ : List[str]= torch.cat((generated, next_token_embed) , dim=1 ) lowercase__ : Optional[Any]= is_stopped + next_tokens.eq(snake_case__ ).squeeze() if is_stopped.all(): break lowercase__ : Optional[Any]= scores / seq_lengths lowercase__ : Any= scores.argsort(descending=snake_case__ ) # tokens tensors are already padded to max_seq_length lowercase__ : str= [tokens[i] for i in order] lowercase__ : Optional[Any]= torch.stack(snake_case__ , dim=0 ) lowercase__ : List[str]= torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
85
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva a : Tuple = """""" a : Optional[Any] = """""" a : Optional[int] = """""" a : int = 1 # (0 is vertical, 1 is horizontal) def lowercase__() ->None: """simple docstring""" lowercase__, lowercase__ : List[Any]= get_dataset(A , A ) print("Processing..." ) lowercase__, lowercase__, lowercase__ : List[str]= update_image_and_anno(A , A , A ) for index, image in enumerate(A ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' lowercase__ : Optional[Any]= random_chars(32 ) lowercase__ : str= paths[index].split(os.sep )[-1].rsplit("." , 1 )[0] lowercase__ : str= f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , A , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(A )} with {file_name}''' ) lowercase__ : Tuple= [] for anno in new_annos[index]: lowercase__ : str= f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(A ) with open(f'''/{file_root}.txt''' , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def lowercase__(A , A ) ->tuple[list, list]: """simple docstring""" lowercase__ : List[str]= [] lowercase__ : List[str]= [] for label_file in glob.glob(os.path.join(A , "*.txt" ) ): lowercase__ : Any= label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(A ) as in_file: lowercase__ : Any= in_file.readlines() lowercase__ : Any= os.path.join(A , f'''{label_name}.jpg''' ) lowercase__ : List[Any]= [] for obj_list in obj_lists: lowercase__ : Dict= obj_list.rstrip("\n" ).split(" " ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(A ) labels.append(A ) return img_paths, labels def lowercase__(A , A , A = 1 ) ->tuple[list, list, list]: """simple docstring""" lowercase__ : List[str]= [] lowercase__ : Any= [] lowercase__ : List[str]= [] for idx in range(len(A ) ): lowercase__ : str= [] lowercase__ : str= img_list[idx] path_list.append(A ) lowercase__ : Union[str, Any]= anno_list[idx] lowercase__ : str= cva.imread(A ) if flip_type == 1: lowercase__ : List[str]= cva.flip(A , A ) for bbox in img_annos: lowercase__ : List[str]= 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: lowercase__ : int= cva.flip(A , A ) for bbox in img_annos: lowercase__ : Tuple= 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(A ) new_imgs_list.append(A ) return new_imgs_list, new_annos_lists, path_list def lowercase__(A = 32 ) ->str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" lowercase__ : List[Any]= ascii_lowercase + digits return "".join(random.choice(A ) for _ in range(A ) ) if __name__ == "__main__": main() print("""DONE ✅""")
85
"""simple docstring""" def lowercase__(A ) ->list: """simple docstring""" if n_term == "": return [] lowercase__ : list= [] for temp in range(int(A ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
85
1
"""simple docstring""" from PIL import Image def lowercase__(A , A ) ->Image: """simple docstring""" lowercase__ : Optional[Any]= (259 * (level + 255)) / (255 * (259 - level)) def contrast(A ) -> int: return int(128 + factor * (c - 128) ) return img.point(A ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 a : List[str] = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
85
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : int = logging.get_logger(__name__) a : str = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "big_bird" def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , ) lowercase__ : Dict= vocab_size lowercase__ : Optional[int]= max_position_embeddings lowercase__ : List[Any]= hidden_size lowercase__ : List[str]= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : Optional[int]= intermediate_size lowercase__ : Optional[int]= hidden_act lowercase__ : Tuple= hidden_dropout_prob lowercase__ : int= attention_probs_dropout_prob lowercase__ : int= initializer_range lowercase__ : List[Any]= type_vocab_size lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : Optional[Any]= use_cache lowercase__ : Union[str, Any]= rescale_embeddings lowercase__ : Union[str, Any]= attention_type lowercase__ : Any= use_bias lowercase__ : List[Any]= block_size lowercase__ : Optional[Any]= num_random_blocks lowercase__ : Optional[int]= classifier_dropout class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Tuple= {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
85
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : List[Any] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
1
"""simple docstring""" import os a : Optional[int] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000} def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Tuple= 0 lowercase__ : Dict= 0 while index < len(A ) - 1: lowercase__ : Optional[Any]= SYMBOLS[numerals[index]] lowercase__ : str= SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def lowercase__(A ) ->str: """simple docstring""" lowercase__ : Optional[int]= "" lowercase__ : Union[str, Any]= num // 1_000 numerals += m_count * "M" num %= 1_000 lowercase__ : Optional[int]= num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 lowercase__ : Union[str, Any]= num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def lowercase__(A = "/p089_roman.txt" ) ->int: """simple docstring""" lowercase__ : Union[str, Any]= 0 with open(os.path.dirname(A ) + roman_numerals_filename ) as filea: lowercase__ : int= filea.readlines() for line in lines: lowercase__ : int= line.strip() lowercase__ : List[Any]= parse_roman_numerals(A ) lowercase__ : Tuple= generate_roman_numerals(A ) savings += len(A ) - len(A ) return savings if __name__ == "__main__": print(F"""{solution() = }""")
85
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : str= [] for part_id in partition_order: lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(A ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Dict= Spark(A ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(2 ) lowercase__ : Optional[Any]= [1, 0] lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions. lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->int: """simple docstring""" lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(1 ) lowercase__ : str= SparkExamplesIterable(A ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(A ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : int= spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: lowercase__ : Optional[Any]= lambda A : x.reverse() lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] ) lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Any: """simple docstring""" lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Optional[int]= Spark(A ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
85
1
"""simple docstring""" def lowercase__(A ) ->int: """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Optional[int]= 0 lowercase__ : Tuple= number while duplicate > 0: lowercase__, lowercase__ : Union[str, Any]= divmod(A , 10 ) fact_sum += factorial(A ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") a : Any = int(input("""Enter number: """).strip()) print( F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.""" )
85
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ): '''simple docstring''' lowercase__ : Optional[int]= parent lowercase__ : Tuple= batch_size lowercase__ : Tuple= seq_length lowercase__ : str= is_training lowercase__ : str= use_input_lengths lowercase__ : Any= use_token_type_ids lowercase__ : List[Any]= use_labels lowercase__ : Optional[int]= gelu_activation lowercase__ : str= sinusoidal_embeddings lowercase__ : List[str]= causal lowercase__ : Any= asm lowercase__ : Optional[int]= n_langs lowercase__ : Union[str, Any]= vocab_size lowercase__ : int= n_special lowercase__ : Any= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : List[str]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : Any= max_position_embeddings lowercase__ : List[Any]= type_vocab_size lowercase__ : int= type_sequence_label_size lowercase__ : Any= initializer_range lowercase__ : Optional[int]= num_labels lowercase__ : Union[str, Any]= num_choices lowercase__ : List[Any]= summary_type lowercase__ : Optional[int]= use_proj lowercase__ : int= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple= None if self.use_input_lengths: lowercase__ : List[Any]= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase__ : Tuple= None if self.use_token_type_ids: lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase__ : str= None lowercase__ : Tuple= None lowercase__ : Dict= None if self.use_labels: lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float() lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any]= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : Any= FlaubertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) lowercase__ : str= model(snake_case__ , langs=snake_case__ ) lowercase__ : Any= model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[str]= model(snake_case__ ) lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= model(snake_case__ ) lowercase__ : Any= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) lowercase__ : List[str]= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) ((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple() lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) ((lowercase__), ) : List[Any]= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ ) lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= self.num_labels lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : int= self.num_choices lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any= model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) : Any= config_and_inputs lowercase__ : Tuple= { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase__ : List[Any]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) lowercase__ : List[str]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModelTester(self ) lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase__ : int= True lowercase__ : List[Any]= model_class(config=snake_case__ ) lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : Dict= torch.jit.trace( snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) ) lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ ) loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) ) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowercase__ : Optional[int]= model(snake_case__ )[0] lowercase__ : Optional[int]= torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case__ ) lowercase__ : Dict= torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
85
1
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase__(A ) ->np.ndarray: """simple docstring""" lowercase__, lowercase__, lowercase__ : Any= rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b def lowercase__(A ) ->np.ndarray: """simple docstring""" return (gray > 127) & (gray <= 255) def lowercase__(A , A ) ->np.ndarray: """simple docstring""" lowercase__ : Union[str, Any]= np.zeros_like(A ) lowercase__ : Optional[int]= np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image lowercase__ : Dict= image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): lowercase__ : Tuple= ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() lowercase__ : Optional[Any]= int(summation > 0 ) return output if __name__ == "__main__": # read original image a : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" a : Optional[Any] = np.array(Image.open(lena_path)) # kernel to be applied a : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) a : str = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image a : Optional[Any] = Image.fromarray(output).convert("""RGB""") pil_img.save("""result_dilation.png""")
85
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = None class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 2 @register_to_config def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ): '''simple docstring''' # standard deviation of the initial noise distribution lowercase__ : int= sigma_max # setable values lowercase__ : int= None lowercase__ : np.IntTensor= None lowercase__ : torch.FloatTensor= None # sigma(t_i) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return sample def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : List[Any]= num_inference_steps lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy() lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ ) lowercase__ : Union[str, Any]= [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowercase__ : str= 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device ) lowercase__ : str= sigma + gamma * sigma lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : int= sample_prev + sigma_prev * model_output lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' raise NotImplementedError()
85
1
"""simple docstring""" import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a : Optional[Any] = logging.getLogger(__name__) a : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) a : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCAmelCase: """simple docstring""" __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(SCREAMING_SNAKE_CASE__ )} , ) __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class __UpperCAmelCase: """simple docstring""" __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"help": "The input training data file (a text file)."} ) __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} ) __lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether ot not to use whole word mask."} ) __lowerCamelCase = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) __lowerCamelCase = field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) __lowerCamelCase = field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} ) __lowerCamelCase = field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) __lowerCamelCase = field( default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def lowercase__(A , A , A = False , A = None , ) ->Optional[int]: """simple docstring""" def _dataset(A , A=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" ) return LineByLineWithRefDataset( tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , ) return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size ) else: return TextDataset( tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Optional[int]= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase__, lowercase__, lowercase__ : List[str]= parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( "Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file " "or remove the --do_eval argument." ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , A ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: lowercase__ : List[str]= AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowercase__ : Optional[Any]= AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: lowercase__ : List[str]= CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.tokenizer_name: lowercase__ : Union[str, Any]= AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowercase__ : Optional[int]= AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another" " script, save it,and load it from here, using --tokenizer_name" ) if model_args.model_name_or_path: lowercase__ : Dict= AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) else: logger.info("Training new model from scratch" ) lowercase__ : List[str]= AutoModelWithLMHead.from_config(A ) model.resize_token_embeddings(len(A ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( "BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the" "--mlm flag (masked language modeling)." ) if data_args.block_size <= 0: lowercase__ : Optional[int]= tokenizer.max_len # Our input block size will be the max possible for the model else: lowercase__ : Dict= min(data_args.block_size , tokenizer.max_len ) # Get datasets lowercase__ : Tuple= ( get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) lowercase__ : List[Any]= ( get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": lowercase__ : Optional[int]= DataCollatorForPermutationLanguageModeling( tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: lowercase__ : Any= DataCollatorForWholeWordMask( tokenizer=A , mlm_probability=data_args.mlm_probability ) else: lowercase__ : Optional[int]= DataCollatorForLanguageModeling( tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer lowercase__ : Tuple= Trainer( model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , ) # Training if training_args.do_train: lowercase__ : Dict= ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=A ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase__ : Any= {} if training_args.do_eval: logger.info("*** Evaluate ***" ) lowercase__ : List[str]= trainer.evaluate() lowercase__ : List[Any]= math.exp(eval_output["eval_loss"] ) lowercase__ : str= {"perplexity": perplexity} lowercase__ : Any= os.path.join(training_args.output_dir , "eval_results_lm.txt" ) if trainer.is_world_master(): with open(A , "w" ) as writer: logger.info("***** Eval results *****" ) for key in sorted(result.keys() ): logger.info(" %s = %s" , A , str(result[key] ) ) writer.write("%s = %s\n" % (key, str(result[key] )) ) results.update(A ) return results def lowercase__(A ) ->Union[str, Any]: """simple docstring""" main() if __name__ == "__main__": main()
85
"""simple docstring""" from ....utils import logging a : List[str] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ): '''simple docstring''' lowercase__ : Dict= config.__dict__ lowercase__ : str= modal_hidden_size if num_labels: lowercase__ : List[str]= num_labels
85
1
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __UpperCAmelCase: """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ : Dict= TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) lowercase__ : Any= AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) lowercase__ : List[Any]= UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowercase__ : Optional[int]= DDPMScheduler( num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) lowercase__ : Union[str, Any]= IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ : Dict= TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) lowercase__ : int= AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) lowercase__ : int= UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.4_14 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowercase__ : List[Any]= DDPMScheduler( num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) lowercase__ : int= DDPMScheduler( num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0 ) lowercase__ : Tuple= IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.get_dummy_components() lowercase__ : str= self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) lowercase__ : Any= self.get_dummy_inputs(snake_case__ ) lowercase__ : Union[str, Any]= inputs["prompt"] lowercase__ : str= inputs["generator"] lowercase__ : List[Any]= inputs["num_inference_steps"] lowercase__ : Optional[Any]= inputs["output_type"] if "image" in inputs: lowercase__ : str= inputs["image"] else: lowercase__ : int= None if "mask_image" in inputs: lowercase__ : str= inputs["mask_image"] else: lowercase__ : Optional[int]= None if "original_image" in inputs: lowercase__ : Optional[int]= inputs["original_image"] else: lowercase__ : Any= None lowercase__, lowercase__ : Tuple= pipe.encode_prompt(snake_case__ ) # inputs with prompt converted to embeddings lowercase__ : str= { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: lowercase__ : Dict= image if mask_image is not None: lowercase__ : Tuple= mask_image if original_image is not None: lowercase__ : Dict= original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(snake_case__ , snake_case__ , snake_case__ ) lowercase__ : Optional[Any]= pipe(**snake_case__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(snake_case__ ) lowercase__ : Union[str, Any]= self.pipeline_class.from_pretrained(snake_case__ ) pipe_loaded.to(snake_case__ ) pipe_loaded.set_progress_bar_config(disable=snake_case__ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(snake_case__ , snake_case__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) lowercase__ : Tuple= self.get_dummy_inputs(snake_case__ ) lowercase__ : Tuple= inputs["generator"] lowercase__ : Union[str, Any]= inputs["num_inference_steps"] lowercase__ : Any= inputs["output_type"] # inputs with prompt converted to embeddings lowercase__ : Optional[Any]= { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: lowercase__ : int= image if mask_image is not None: lowercase__ : int= mask_image if original_image is not None: lowercase__ : List[str]= original_image lowercase__ : Union[str, Any]= pipe_loaded(**snake_case__ )[0] lowercase__ : Optional[Any]= np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max() self.assertLess(snake_case__ , 1e-4 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.get_dummy_components() lowercase__ : Tuple= self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) lowercase__ : Dict= self.get_dummy_inputs(snake_case__ ) lowercase__ : Dict= pipe(**snake_case__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(snake_case__ ) lowercase__ : List[str]= self.pipeline_class.from_pretrained(snake_case__ ) pipe_loaded.to(snake_case__ ) pipe_loaded.set_progress_bar_config(disable=snake_case__ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowercase__ : Tuple= self.get_dummy_inputs(snake_case__ ) lowercase__ : List[str]= pipe_loaded(**snake_case__ )[0] lowercase__ : Union[str, Any]= np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max() self.assertLess(snake_case__ , 1e-4 )
85
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Optional[int]= [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : Any= [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def lowercase__(A ) ->List[Any]: """simple docstring""" lowercase__ : Dict= [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") ) return token def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Dict= [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowercase__(A , A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : List[str]= "imagenet-1k-id2label.json" lowercase__ : List[str]= 1_000 lowercase__ : Tuple= "huggingface/label-files" lowercase__ : int= num_labels lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) ) lowercase__ : str= {int(A ): v for k, v in idalabel.items()} lowercase__ : Optional[int]= idalabel lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()} lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowercase__ : int= [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowercase__ : Union[str, Any]= [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Optional[Any]= [2, 2, 20] lowercase__ : Optional[Any]= [3, 12, 16] lowercase__ : List[str]= [192, 768, 1_024] lowercase__ : List[str]= CvtForImageClassification(A ) lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowercase__ : Dict= image_size lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) ) lowercase__ : Optional[Any]= OrderedDict() lowercase__ : Tuple= [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Optional[int]= list_of_state_dict + cls_token(A ) lowercase__ : List[str]= list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): lowercase__ : Dict= list_of_state_dict + attention(A , A ) lowercase__ : Optional[Any]= list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): lowercase__ : str= original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
85
1
"""simple docstring""" def lowercase__(A = 1_000 ) ->int: """simple docstring""" lowercase__ : Union[str, Any]= 2**power lowercase__ : Dict= str(A ) lowercase__ : List[str]= list(A ) lowercase__ : Union[str, Any]= 0 for i in list_num: sum_of_num += int(A ) return sum_of_num if __name__ == "__main__": a : Any = int(input("""Enter the power of 2: """).strip()) print("""2 ^ """, power, """ = """, 2**power) a : Union[str, Any] = solution(power) print("""Sum of the digits is: """, result)
85
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[Any]= self.unet.config.sample_size lowercase__ : Dict= (batch_size, 3, img_size, img_size) lowercase__ : List[Any]= self.unet lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma lowercase__ : Tuple= sample.to(self.device ) self.scheduler.set_timesteps(snake_case__ ) self.scheduler.set_sigmas(snake_case__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # prediction step lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ) lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean lowercase__ : List[str]= sample_mean.clamp(0 , 1 ) lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : str= self.numpy_to_pil(snake_case__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case__ )
85
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a : List[Any] = { """configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""], """feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""], """processing_wav2vec2""": ["""Wav2Vec2Processor"""], """tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ """WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Wav2Vec2ForAudioFrameClassification""", """Wav2Vec2ForCTC""", """Wav2Vec2ForMaskedLM""", """Wav2Vec2ForPreTraining""", """Wav2Vec2ForSequenceClassification""", """Wav2Vec2ForXVector""", """Wav2Vec2Model""", """Wav2Vec2PreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ """TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWav2Vec2ForCTC""", """TFWav2Vec2Model""", """TFWav2Vec2PreTrainedModel""", """TFWav2Vec2ForSequenceClassification""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = [ """FlaxWav2Vec2ForCTC""", """FlaxWav2Vec2ForPreTraining""", """FlaxWav2Vec2Model""", """FlaxWav2Vec2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
"""simple docstring""" def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[str]= len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: lowercase__, lowercase__ : List[str]= numbers[j], numbers[i] return numbers if __name__ == "__main__": a : Dict = input("""Enter numbers separated by a comma:\n""").strip() a : List[str] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
85
1
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time a : List[str] = Lock() def lowercase__(A , A , A , A , A , A , A ) ->Dict: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowercase__ : Dict= rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowercase__ : Union[str, Any]= min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowercase__ : Optional[Any]= lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowercase__ : str= max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def lowercase__(A ) ->Optional[Any]: """simple docstring""" lowercase__ : Dict= [] lowercase__ : Any= [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowercase__ : List[str]= Pipe() lowercase__ : List[Any]= Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowercase__ : List[Any]= temp_rs lowercase__ : Dict= temp_rr for i in range(1 , len(A ) - 1 ): lowercase__ : Optional[int]= Pipe() lowercase__ : Optional[int]= Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowercase__ : int= temp_rs lowercase__ : Any= temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): lowercase__ : List[str]= result_pipe[p][0].recv() process_array_[p].join() return arr def lowercase__() ->List[Any]: """simple docstring""" lowercase__ : Optional[int]= list(range(10 , 0 , -1 ) ) print("Initial List" ) print(*A ) lowercase__ : List[Any]= odd_even_transposition(A ) print("Sorted List\n" ) print(*A ) if __name__ == "__main__": main()
85
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowercase__(A ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__() ->Iterator[int]: """simple docstring""" lowercase__ : Union[str, Any]= 2 while True: if is_prime(A ): yield num num += 1 def lowercase__(A = 2_000_000 ) ->int: """simple docstring""" return sum(takewhile(lambda A : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
85
1
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowercase__(A ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__() ->Iterator[int]: """simple docstring""" lowercase__ : Union[str, Any]= 2 while True: if is_prime(A ): yield num num += 1 def lowercase__(A = 2_000_000 ) ->int: """simple docstring""" return sum(takewhile(lambda A : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
85
"""simple docstring""" def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Tuple= (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowercase__(A = 5_000 ) ->int: """simple docstring""" lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )] for i, pentagonal_i in enumerate(A ): for j in range(A , len(A ) ): lowercase__ : List[Any]= pentagonal_nums[j] lowercase__ : int= pentagonal_i + pentagonal_j lowercase__ : Optional[int]= pentagonal_j - pentagonal_i if is_pentagonal(A ) and is_pentagonal(A ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
85
1
"""simple docstring""" from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase__(A ) ->Any: """simple docstring""" if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(A ): return ext raise Exception( f'''Unable to determine file format from file extension {path}. ''' f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' ) def lowercase__(A ) ->Tuple: """simple docstring""" lowercase__ : List[str]= pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) lowercase__ : Union[str, Any]= try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format lowercase__ : Tuple= PipelineDataFormat.from_str( format=A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(A , A ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Any= nlp lowercase__ : Union[str, Any]= reader @staticmethod def UpperCAmelCase_ ( snake_case__ ): '''simple docstring''' lowercase__ : Union[str, Any]= parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=snake_case__ , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=snake_case__ , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=snake_case__ , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=snake_case__ , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=snake_case__ , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=snake_case__ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=snake_case__ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=snake_case__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Union[str, Any]= self._nlp, [] for entry in self._reader: lowercase__ : Optional[Any]= nlp(**snake_case__ ) if self._reader.is_multi_columns else nlp(snake_case__ ) if isinstance(snake_case__ , snake_case__ ): outputs.append(snake_case__ ) else: outputs += output # Saving data if self._nlp.binary_output: lowercase__ : str= self._reader.save_binary(snake_case__ ) logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' ) else: self._reader.save(snake_case__ )
85
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Union[str, Any] = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_text_model" __lowerCamelCase = ["past_key_values"] __lowerCamelCase = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' lowercase__ : int= vocab_size lowercase__ : Optional[Any]= hidden_size lowercase__ : Tuple= d_kv lowercase__ : Optional[int]= d_ff lowercase__ : Any= num_layers lowercase__ : Dict= num_heads lowercase__ : List[Any]= relative_attention_num_buckets lowercase__ : Optional[Any]= relative_attention_max_distance lowercase__ : Dict= dropout_rate lowercase__ : Tuple= layer_norm_epsilon lowercase__ : str= initializer_factor lowercase__ : Any= use_cache lowercase__ : Optional[int]= eos_token_id lowercase__ : str= decoder_start_token_id # for backwards compatibility lowercase__ : Optional[Any]= dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : str= config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_vision_model" def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Tuple= hidden_size lowercase__ : Tuple= patch_embed_hidden_size lowercase__ : Optional[Any]= d_ff lowercase__ : Dict= dropout_rate lowercase__ : Any= num_hidden_layers lowercase__ : Optional[int]= num_attention_heads lowercase__ : Dict= initializer_range lowercase__ : Tuple= initializer_factor lowercase__ : Tuple= attention_dropout lowercase__ : Optional[Any]= layer_norm_eps lowercase__ : List[Any]= dense_act_fn lowercase__ : str= seq_len lowercase__ : List[str]= relative_attention_num_buckets lowercase__ : Union[str, Any]= relative_attention_max_distance lowercase__ : Dict= d_kv @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : Union[str, Any]= config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct" __lowerCamelCase = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase__ : List[Any]= {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowercase__ : str= {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowercase__ : str= PixaStructTextConfig(**snake_case__ ) lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ ) lowercase__ : int= self.text_config.decoder_start_token_id lowercase__ : List[Any]= self.text_config.pad_token_id lowercase__ : Any= self.text_config.eos_token_id lowercase__ : Any= initializer_factor lowercase__ : int= initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : Dict= is_vqa @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ ) lowercase__ : str= self.text_config.to_dict() lowercase__ : str= self.vision_config.to_dict() lowercase__ : List[str]= self.__class__.model_type return output
85
1
"""simple docstring""" def lowercase__(A , A ) ->Dict: """simple docstring""" lowercase__ : List[str]= [1] for i in range(2 , A ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" lowercase__ : Any= [] lowercase__ : str= list(range(A ) ) # Find permutation while factorials: lowercase__ : List[str]= factorials.pop() lowercase__, lowercase__ : List[str]= divmod(A , A ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
85
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" ) lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy() lowercase__ : int= -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
85
1
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "BridgeTowerImageProcessor" __lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[int]= self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # add pixel_values + pixel_mask lowercase__ : Optional[int]= self.image_processor( snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ ) encoding.update(snake_case__ ) return encoding def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.tokenizer.model_input_names lowercase__ : List[Any]= self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
85
1
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) a : List[str] = parser.parse_args() a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) a : Optional[Any] = CLIPImageProcessor() a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") a : Tuple = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= tempfile.mkdtemp() lowercase__ : Optional[Any]= 8 # DPR tok lowercase__ : Tuple= [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok lowercase__ : List[Any]= [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple= {"unk_token": "<unk>"} lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.get_dummy_dataset() lowercase__ : Optional[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= dataset lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Dict= self.get_dummy_dataset() lowercase__ : Tuple= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" ) lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset lowercase__ : List[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(snake_case__ , open(snake_case__ , "wb" ) ) lowercase__ : List[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) lowercase__ : Optional[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= self.get_dummy_dataset() retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Tuple= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= 1 lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : int= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : int= self.get_dummy_legacy_index_retriever() lowercase__ : Optional[Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : str= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' import torch lowercase__ : str= 1 lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : str= [[5, 7], [10, 11]] lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) lowercase__, lowercase__, lowercase__ : Optional[int]= ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , np.ndarray ) lowercase__ : Any= retriever( snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , ) lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer() lowercase__ : Dict= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) retriever.set_ctx_encoder_tokenizer(snake_case__ ) lowercase__ : List[str]= [[5, 7], [10, 11]] lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) self.assertEqual( len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
85
1
"""simple docstring""" from __future__ import annotations def lowercase__(A , A ) ->tuple[int, int]: """simple docstring""" if b == 0: return (1, 0) ((lowercase__), (lowercase__)) : Union[str, Any]= extended_euclid(A , a % b ) lowercase__ : int= a // b return (y, x - k * y) def lowercase__(A , A , A , A ) ->int: """simple docstring""" ((lowercase__), (lowercase__)) : int= extended_euclid(A , A ) lowercase__ : Optional[Any]= na * na lowercase__ : Any= ra * x * na + ra * y * na return (n % m + m) % m def lowercase__(A , A ) ->int: """simple docstring""" ((lowercase__), (lowercase__)) : Optional[int]= extended_euclid(A , A ) if b < 0: lowercase__ : int= (b % n + n) % n return b def lowercase__(A , A , A , A ) ->int: """simple docstring""" lowercase__, lowercase__ : List[str]= invert_modulo(A , A ), invert_modulo(A , A ) lowercase__ : List[str]= na * na lowercase__ : str= ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name="""chinese_remainder_theorem""", verbose=True) testmod(name="""chinese_remainder_theorem2""", verbose=True) testmod(name="""invert_modulo""", verbose=True) testmod(name="""extended_euclid""", verbose=True)
85
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "AutoImageProcessor" __lowerCamelCase = "AutoTokenizer" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) lowercase__ : List[Any]= self.image_processor def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: lowercase__ : Any= image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
85
1
"""simple docstring""" def lowercase__(A , A ) ->int: """simple docstring""" lowercase__ : int= 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): lowercase__ : List[str]= n - k # Calculate C(n,k) for i in range(A ): result *= n - i result //= i + 1 return result def lowercase__(A ) ->int: """simple docstring""" return binomial_coefficient(2 * node_count , A ) // (node_count + 1) def lowercase__(A ) ->int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) lowercase__ : Any= 1 for i in range(1 , n + 1 ): result *= i return result def lowercase__(A ) ->int: """simple docstring""" return catalan_number(A ) * factorial(A ) if __name__ == "__main__": a : Optional[int] = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """ F"""binary trees and {catalan_number(node_count)} binary search trees.""" )
85
"""simple docstring""" a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/""" def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ): lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ : Tuple= len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ : str= b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ : str= ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ : Optional[Any]= encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ : List[Any]= encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ : str= encoded_data[:-padding] lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ : Any= [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
85
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a : str = logging.get_logger(__name__) a : Any = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } a : Optional[int] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowercase__(A ) ->List[str]: """simple docstring""" lowercase__ : Optional[int]= {} with open(A , "r" ) as file: for line_number, line in enumerate(A ): lowercase__ : Optional[Any]= line.strip() if line: lowercase__ : Tuple= line.split() lowercase__ : Optional[int]= line_number lowercase__ : Optional[Any]= words[0] lowercase__ : Optional[int]= value return result def lowercase__(A , A , A , A , A ) ->int: """simple docstring""" for attribute in key.split("." ): lowercase__ : int= getattr(A , A ) lowercase__ : Optional[Any]= None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(A ): lowercase__ : Optional[int]= PARAM_MAPPING[full_name.split("." )[-1]] lowercase__ : List[Any]= "param" if weight_type is not None and weight_type != "param": lowercase__ : Union[str, Any]= getattr(A , A ).shape elif weight_type is not None and weight_type == "param": lowercase__ : Dict= hf_pointer for attribute in hf_param_name.split("." ): lowercase__ : Optional[int]= getattr(A , A ) lowercase__ : List[str]= shape_pointer.shape # let's reduce dimension lowercase__ : int= value[0] else: lowercase__ : List[str]= hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase__ : int= value elif weight_type == "weight_g": lowercase__ : Any= value elif weight_type == "weight_v": lowercase__ : List[str]= value elif weight_type == "bias": lowercase__ : str= value elif weight_type == "param": for attribute in hf_param_name.split("." ): lowercase__ : List[str]= getattr(A , A ) lowercase__ : Any= value else: lowercase__ : str= value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def lowercase__(A , A , A , A , A ) ->Tuple: """simple docstring""" lowercase__ : int= None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(A ): lowercase__ : Union[str, Any]= PARAM_MAPPING[full_name.split("." )[-1]] lowercase__ : Dict= "param" if weight_type is not None and weight_type != "param": lowercase__ : Optional[int]= ".".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": lowercase__ : List[str]= ".".join([key, hf_param_name] ) else: lowercase__ : Union[str, Any]= key lowercase__ : Union[str, Any]= value if "lm_head" in full_key else value[0] a : int = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowercase__(A , A , A=None , A=None ) ->Dict: """simple docstring""" lowercase__ : Optional[Any]= False for key, mapped_key in MAPPING.items(): lowercase__ : List[Any]= "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowercase__ : Any= True if "*" in mapped_key: lowercase__ : Optional[Any]= name.split(A )[0].split("." )[-2] lowercase__ : str= mapped_key.replace("*" , A ) if "weight_g" in name: lowercase__ : Optional[int]= "weight_g" elif "weight_v" in name: lowercase__ : Optional[int]= "weight_v" elif "bias" in name: lowercase__ : List[str]= "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ : Tuple= "weight" else: lowercase__ : str= None if hf_dict is not None: rename_dict(A , A , A , A , A ) else: set_recursively(A , A , A , A , A ) return is_used return is_used def lowercase__(A , A , A ) ->Union[str, Any]: """simple docstring""" lowercase__ : Dict= [] lowercase__ : Any= fairseq_model.state_dict() lowercase__ : int= hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : List[str]= False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == "group" , ) lowercase__ : int= True else: lowercase__ : Optional[int]= load_wavaveca_layer(A , A , A ) if not is_used: unused_weights.append(A ) logger.warning(f'''Unused weights: {unused_weights}''' ) def lowercase__(A , A , A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : Optional[int]= full_name.split("conv_layers." )[-1] lowercase__ : Any= name.split("." ) lowercase__ : List[Any]= int(items[0] ) lowercase__ : List[str]= int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase__ : Tuple= value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase__ : int= value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowercase__ : Optional[int]= value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase__ : List[Any]= value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A ) @torch.no_grad() def lowercase__(A , A , A=None , A=None , A=True , A=False ) ->Dict: """simple docstring""" if config_path is not None: lowercase__ : Any= WavaVecaConfig.from_pretrained(A ) else: lowercase__ : List[str]= WavaVecaConfig() if is_seq_class: lowercase__ : List[Any]= read_txt_into_dict(A ) lowercase__ : int= idalabel lowercase__ : Any= WavaVecaForSequenceClassification(A ) lowercase__ : Optional[Any]= WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) feature_extractor.save_pretrained(A ) elif is_finetuned: if dict_path: lowercase__ : List[str]= Dictionary.load(A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase__ : Tuple= target_dict.pad_index lowercase__ : str= target_dict.bos_index lowercase__ : int= target_dict.eos_index lowercase__ : Any= len(target_dict.symbols ) lowercase__ : int= os.path.join(A , "vocab.json" ) if not os.path.isdir(A ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A ) ) return os.makedirs(A , exist_ok=A ) lowercase__ : Optional[int]= target_dict.indices # fairseq has the <pad> and <s> switched lowercase__ : Union[str, Any]= 0 lowercase__ : Dict= 1 with open(A , "w" , encoding="utf-8" ) as vocab_handle: json.dump(A , A ) lowercase__ : Union[str, Any]= WavaVecaCTCTokenizer( A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=A , ) lowercase__ : List[str]= True if config.feat_extract_norm == "layer" else False lowercase__ : int= WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) lowercase__ : str= WavaVecaProcessor(feature_extractor=A , tokenizer=A ) processor.save_pretrained(A ) lowercase__ : int= WavaVecaForCTC(A ) else: lowercase__ : Union[str, Any]= WavaVecaForPreTraining(A ) if is_finetuned or is_seq_class: lowercase__, lowercase__, lowercase__ : List[Any]= fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: lowercase__ : List[Any]= argparse.Namespace(task="audio_pretraining" ) lowercase__ : Optional[int]= fairseq.tasks.setup_task(A ) lowercase__, lowercase__, lowercase__ : Tuple= fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A ) lowercase__ : Optional[int]= model[0].eval() recursively_load_weights(A , A , not is_finetuned ) hf_wavavec.save_pretrained(A ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) a : List[str] = parser.parse_args() a : List[str] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
85
"""simple docstring""" from __future__ import annotations def lowercase__(A ) ->list[int]: # This function is recursive """simple docstring""" lowercase__ : int= len(A ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowercase__ : str= array[0] lowercase__ : Optional[Any]= False lowercase__ : Any= 1 lowercase__ : list[int]= [] while not is_found and i < array_length: if array[i] < pivot: lowercase__ : Union[str, Any]= True lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]] lowercase__ : Union[str, Any]= longest_subsequence(A ) if len(A ) > len(A ): lowercase__ : List[str]= temp_array else: i += 1 lowercase__ : List[str]= [element for element in array[1:] if element >= pivot] lowercase__ : List[str]= [pivot, *longest_subsequence(A )] if len(A ) > len(A ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
85
1
"""simple docstring""" from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
85
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) a : List[str] = parser.parse_args() a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) a : Optional[Any] = CLIPImageProcessor() a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") a : Tuple = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
1
"""simple docstring""" from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract a : Optional[Any] = logging.get_logger(__name__) def lowercase__(A , A , A ) ->Dict: """simple docstring""" return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def lowercase__(A , A , A ) ->int: """simple docstring""" lowercase__ : Optional[Any]= to_pil_image(A ) lowercase__, lowercase__ : Optional[Any]= pil_image.size lowercase__ : Union[str, Any]= pytesseract.image_to_data(A , lang=A , output_type="dict" , config=A ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Any= data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates lowercase__ : List[Any]= [idx for idx, word in enumerate(A ) if not word.strip()] lowercase__ : int= [word for idx, word in enumerate(A ) if idx not in irrelevant_indices] lowercase__ : Union[str, Any]= [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] lowercase__ : Optional[Any]= [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] lowercase__ : Union[str, Any]= [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] lowercase__ : Optional[int]= [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format lowercase__ : Dict= [] for x, y, w, h in zip(A , A , A , A ): lowercase__ : Union[str, Any]= [x, y, x + w, y + h] actual_boxes.append(A ) # finally, normalize the bounding boxes lowercase__ : str= [] for box in actual_boxes: normalized_boxes.append(normalize_box(A , A , A ) ) assert len(A ) == len(A ), "Not as many words as there are bounding boxes" return words, normalized_boxes class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["pixel_values"] def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = None , snake_case__ = "" , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : List[Any]= size if size is not None else {"height": 224, "width": 224} lowercase__ : Union[str, Any]= get_size_dict(snake_case__ ) lowercase__ : Union[str, Any]= do_resize lowercase__ : Union[str, Any]= size lowercase__ : Tuple= resample lowercase__ : Tuple= do_rescale lowercase__ : Any= rescale_value lowercase__ : Optional[Any]= do_normalize lowercase__ : str= image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase__ : Any= image_std if image_std is not None else IMAGENET_STANDARD_STD lowercase__ : int= apply_ocr lowercase__ : Tuple= ocr_lang lowercase__ : str= tesseract_config def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = None , **snake_case__ , ): '''simple docstring''' lowercase__ : Union[str, Any]= get_size_dict(snake_case__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowercase__ : Dict= (size["height"], size["width"]) return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ): '''simple docstring''' return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ): '''simple docstring''' return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= do_resize if do_resize is not None else self.do_resize lowercase__ : Tuple= size if size is not None else self.size lowercase__ : List[str]= get_size_dict(snake_case__ ) lowercase__ : int= resample if resample is not None else self.resample lowercase__ : List[Any]= do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Any= rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Dict= do_normalize if do_normalize is not None else self.do_normalize lowercase__ : str= image_mean if image_mean is not None else self.image_mean lowercase__ : List[str]= image_std if image_std is not None else self.image_std lowercase__ : Union[str, Any]= apply_ocr if apply_ocr is not None else self.apply_ocr lowercase__ : str= ocr_lang if ocr_lang is not None else self.ocr_lang lowercase__ : Dict= tesseract_config if tesseract_config is not None else self.tesseract_config lowercase__ : Tuple= make_list_of_images(snake_case__ ) if not valid_images(snake_case__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("If do_normalize is True, image_mean and image_std must be specified." ) # All transformations expect numpy arrays. lowercase__ : Any= [to_numpy_array(snake_case__ ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , "pytesseract" ) lowercase__ : Union[str, Any]= [] lowercase__ : List[Any]= [] for image in images: lowercase__, lowercase__ : Any= apply_tesseract(snake_case__ , snake_case__ , snake_case__ ) words_batch.append(snake_case__ ) boxes_batch.append(snake_case__ ) if do_resize: lowercase__ : Dict= [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images] if do_rescale: lowercase__ : Union[str, Any]= [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images] if do_normalize: lowercase__ : List[Any]= [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images] lowercase__ : Optional[Any]= [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images] lowercase__ : Dict= BatchFeature(data={"pixel_values": images} , tensor_type=snake_case__ ) if apply_ocr: lowercase__ : List[str]= words_batch lowercase__ : Optional[int]= boxes_batch return data
85
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
1
"""simple docstring""" from __future__ import annotations from random import random from typing import Generic, TypeVar a : str = TypeVar("""KT""") a : Optional[Any] = TypeVar("""VT""") class __UpperCAmelCase( Generic[KT, VT] ): """simple docstring""" def __init__( self , snake_case__ = "root" , snake_case__ = None ): '''simple docstring''' lowercase__ : str= key lowercase__ : Dict= value lowercase__ : list[Node[KT, VT]]= [] def __repr__( self ): '''simple docstring''' return F'''Node({self.key}: {self.value})''' @property def UpperCAmelCase_ ( self ): '''simple docstring''' return len(self.forward ) class __UpperCAmelCase( Generic[KT, VT] ): """simple docstring""" def __init__( self , snake_case__ = 0.5 , snake_case__ = 16 ): '''simple docstring''' lowercase__ : Node[KT, VT]= Node[KT, VT]() lowercase__ : Any= 0 lowercase__ : List[str]= p lowercase__ : Optional[int]= max_level def __str__( self ): '''simple docstring''' lowercase__ : List[str]= list(self ) if len(snake_case__ ) == 0: return F'''SkipList(level={self.level})''' lowercase__ : Any= max((len(str(snake_case__ ) ) for item in items) , default=4 ) lowercase__ : List[Any]= max(snake_case__ , 4 ) + 4 lowercase__ : Tuple= self.head lowercase__ : Optional[int]= [] lowercase__ : Optional[Any]= node.forward.copy() lines.append(F'''[{node.key}]'''.ljust(snake_case__ , "-" ) + "* " * len(snake_case__ ) ) lines.append(" " * label_size + "| " * len(snake_case__ ) ) while len(node.forward ) != 0: lowercase__ : Dict= node.forward[0] lines.append( F'''[{node.key}]'''.ljust(snake_case__ , "-" ) + " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) ) lines.append(" " * label_size + "| " * len(snake_case__ ) ) lowercase__ : Union[str, Any]= node.forward lines.append("None".ljust(snake_case__ ) + "* " * len(snake_case__ ) ) return F'''SkipList(level={self.level})\n''' + "\n".join(snake_case__ ) def __iter__( self ): '''simple docstring''' lowercase__ : Tuple= self.head while len(node.forward ) != 0: yield node.forward[0].key lowercase__ : Dict= node.forward[0] def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= 1 while random() < self.p and level < self.max_level: level += 1 return level def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Tuple= [] lowercase__ : int= self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: lowercase__ : Optional[int]= node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(snake_case__ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__, lowercase__ : Union[str, Any]= self._locate_node(snake_case__ ) if node is not None: for i, update_node in enumerate(snake_case__ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: lowercase__ : Union[str, Any]= node.forward[i] else: lowercase__ : Optional[Any]= update_node.forward[:i] def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__, lowercase__ : str= self._locate_node(snake_case__ ) if node is not None: lowercase__ : List[Any]= value else: lowercase__ : Any= self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , snake_case__ ): update_vector.append(self.head ) lowercase__ : Optional[int]= level lowercase__ : Union[str, Any]= Node(snake_case__ , snake_case__ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(snake_case__ ) else: lowercase__ : Tuple= new_node def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__, lowercase__ : Any= self._locate_node(snake_case__ ) if node is not None: return node.value return None def lowercase__() ->List[str]: """simple docstring""" lowercase__ : Dict= SkipList() skip_list.insert("Key1" , 3 ) skip_list.insert("Key2" , 12 ) skip_list.insert("Key3" , 41 ) skip_list.insert("Key4" , -19 ) lowercase__ : Tuple= skip_list.head lowercase__ : str= {} while node.level != 0: lowercase__ : List[str]= node.forward[0] lowercase__ : List[str]= node.value assert len(A ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def lowercase__() ->List[Any]: """simple docstring""" lowercase__ : Optional[int]= SkipList() skip_list.insert("Key1" , 10 ) skip_list.insert("Key1" , 12 ) skip_list.insert("Key5" , 7 ) skip_list.insert("Key7" , 10 ) skip_list.insert("Key10" , 5 ) skip_list.insert("Key7" , 7 ) skip_list.insert("Key5" , 5 ) skip_list.insert("Key10" , 10 ) lowercase__ : List[Any]= skip_list.head lowercase__ : str= {} while node.level != 0: lowercase__ : Union[str, Any]= node.forward[0] lowercase__ : List[str]= node.value if len(A ) != 4: print() assert len(A ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def lowercase__() ->Optional[int]: """simple docstring""" lowercase__ : List[Any]= SkipList() assert skip_list.find("Some key" ) is None def lowercase__() ->Optional[int]: """simple docstring""" lowercase__ : Optional[int]= SkipList() skip_list.insert("Key2" , 20 ) assert skip_list.find("Key2" ) == 20 skip_list.insert("Some Key" , 10 ) skip_list.insert("Key2" , 8 ) skip_list.insert("V" , 13 ) assert skip_list.find("Y" ) is None assert skip_list.find("Key2" ) == 8 assert skip_list.find("Some Key" ) == 10 assert skip_list.find("V" ) == 13 def lowercase__() ->int: """simple docstring""" lowercase__ : Optional[int]= SkipList() skip_list.delete("Some key" ) assert len(skip_list.head.forward ) == 0 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : List[str]= SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("Key2" ) is None def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 14 ) skip_list.insert("Key2" , 15 ) skip_list.delete("V" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) == 14 assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("X" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) == 12 assert skip_list.find("Key2" ) == 15 skip_list.delete("Key1" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) == 15 skip_list.delete("Key2" ) assert skip_list.find("V" ) is None assert skip_list.find("X" ) is None assert skip_list.find("Key1" ) is None assert skip_list.find("Key2" ) is None def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : List[Any]= SkipList() skip_list.insert("Key1" , 12 ) skip_list.insert("V" , 13 ) skip_list.insert("X" , 142 ) skip_list.insert("Key2" , 15 ) skip_list.delete("X" ) def traverse_keys(A ): yield node.key for forward_node in node.forward: yield from traverse_keys(A ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def lowercase__() ->Dict: """simple docstring""" def is_sorted(A ): return all(next_item >= item for item, next_item in zip(A , lst[1:] ) ) lowercase__ : List[str]= SkipList() for i in range(10 ): skip_list.insert(A , A ) assert is_sorted(list(A ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(A ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(A ) ) def lowercase__() ->Optional[int]: """simple docstring""" for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def lowercase__() ->int: """simple docstring""" lowercase__ : Dict= SkipList() skip_list.insert(2 , "2" ) skip_list.insert(4 , "4" ) skip_list.insert(6 , "4" ) skip_list.insert(4 , "5" ) skip_list.insert(8 , "4" ) skip_list.insert(9 , "4" ) skip_list.delete(4 ) print(A ) if __name__ == "__main__": import doctest doctest.testmod() main()
85
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
1
"""simple docstring""" from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def lowercase__() ->Optional[Any]: """simple docstring""" lowercase__ : List[Any]= ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" ) lowercase__ : Any= parser.add_subparsers(help="transformers-cli command helpers" ) # Register commands ConvertCommand.register_subcommand(A ) DownloadCommand.register_subcommand(A ) EnvironmentCommand.register_subcommand(A ) RunCommand.register_subcommand(A ) ServeCommand.register_subcommand(A ) UserCommands.register_subcommand(A ) AddNewModelCommand.register_subcommand(A ) AddNewModelLikeCommand.register_subcommand(A ) LfsCommands.register_subcommand(A ) PTtoTFCommand.register_subcommand(A ) # Let's go lowercase__ : Union[str, Any]= parser.parse_args() if not hasattr(A , "func" ): parser.print_help() exit(1 ) # Run lowercase__ : Dict= args.func(A ) service.run() if __name__ == "__main__": main()
85
"""simple docstring""" def lowercase__(A ) ->list: """simple docstring""" if n_term == "": return [] lowercase__ : list= [] for temp in range(int(A ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
85
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a : int = { """configuration_layoutlmv3""": [ """LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv3Config""", """LayoutLMv3OnnxConfig""", ], """processing_layoutlmv3""": ["""LayoutLMv3Processor"""], """tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = ["""LayoutLMv3TokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Any = [ """LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv3ForQuestionAnswering""", """LayoutLMv3ForSequenceClassification""", """LayoutLMv3ForTokenClassification""", """LayoutLMv3Model""", """LayoutLMv3PreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = [ """TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLayoutLMv3ForQuestionAnswering""", """TFLayoutLMv3ForSequenceClassification""", """TFLayoutLMv3ForTokenClassification""", """TFLayoutLMv3Model""", """TFLayoutLMv3PreTrainedModel""", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = ["""LayoutLMv3FeatureExtractor"""] a : int = ["""LayoutLMv3ImageProcessor"""] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : int = logging.get_logger(__name__) a : str = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "big_bird" def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , ) lowercase__ : Dict= vocab_size lowercase__ : Optional[int]= max_position_embeddings lowercase__ : List[Any]= hidden_size lowercase__ : List[str]= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : Optional[int]= intermediate_size lowercase__ : Optional[int]= hidden_act lowercase__ : Tuple= hidden_dropout_prob lowercase__ : int= attention_probs_dropout_prob lowercase__ : int= initializer_range lowercase__ : List[Any]= type_vocab_size lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : Optional[Any]= use_cache lowercase__ : Union[str, Any]= rescale_embeddings lowercase__ : Union[str, Any]= attention_type lowercase__ : Any= use_bias lowercase__ : List[Any]= block_size lowercase__ : Optional[Any]= num_random_blocks lowercase__ : Optional[int]= classifier_dropout class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Tuple= {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
85
1
"""simple docstring""" import os def lowercase__(A = "input.txt" ) ->int: """simple docstring""" with open(os.path.join(os.path.dirname(A ) , A ) ) as input_file: lowercase__ : Optional[Any]= [ [int(A ) for element in line.split("," )] for line in input_file.readlines() ] lowercase__ : Optional[Any]= len(A ) lowercase__ : Dict= len(matrix[0] ) lowercase__ : Any= [[-1 for _ in range(A )] for _ in range(A )] for i in range(A ): lowercase__ : Any= matrix[i][0] for j in range(1 , A ): for i in range(A ): lowercase__ : List[str]= minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , A ): lowercase__ : str= min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowercase__ : int= min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F"""{solution() = }""")
85
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
1
"""simple docstring""" import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu a : Dict = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: a : str = json.load(f) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return FSMTTokenizer.from_pretrained(snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : List[Any]= FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality lowercase__ : Tuple= F'''facebook/wmt19-{pair}''' lowercase__ : int= self.get_tokenizer(snake_case__ ) lowercase__ : Optional[int]= self.get_model(snake_case__ ) lowercase__ : Tuple= bleu_data[pair]["src"] lowercase__ : List[str]= bleu_data[pair]["tgt"] lowercase__ : List[Any]= tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ ) lowercase__ : Any= model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowercase__ : Union[str, Any]= tokenizer.batch_decode( snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) lowercase__ : Optional[Any]= calculate_bleu(snake_case__ , snake_case__ ) print(snake_case__ ) self.assertGreaterEqual(scores["bleu"] , snake_case__ )
85
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : str= [] for part_id in partition_order: lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(A ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Dict= Spark(A ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(2 ) lowercase__ : Optional[Any]= [1, 0] lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions. lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->int: """simple docstring""" lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(1 ) lowercase__ : str= SparkExamplesIterable(A ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(A ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : int= spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: lowercase__ : Optional[Any]= lambda A : x.reverse() lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] ) lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Any: """simple docstring""" lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Optional[int]= Spark(A ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
85
1
"""simple docstring""" from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowercase__(A , A , A , A , ) ->list[float]: """simple docstring""" lowercase__, lowercase__ : List[Any]= coefficient_matrix.shape lowercase__, lowercase__ : Any= constant_matrix.shape if rowsa != colsa: lowercase__ : int= f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(A ) if colsa != 1: lowercase__ : Tuple= f'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(A ) if rowsa != rowsa: lowercase__ : Tuple= ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(A ) if len(A ) != rowsa: lowercase__ : List[str]= ( "Number of initial values must be equal to number of rows in coefficient " f'''matrix but received {len(A )} and {rowsa}''' ) raise ValueError(A ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) lowercase__ : NDArray[floataa]= np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) lowercase__, lowercase__ : Tuple= table.shape strictly_diagonally_dominant(A ) # Iterates the whole matrix for given number of times for _ in range(A ): lowercase__ : str= [] for row in range(A ): lowercase__ : Optional[int]= 0 for col in range(A ): if col == row: lowercase__ : Optional[Any]= table[row][col] elif col == cols - 1: lowercase__ : Optional[Any]= table[row][col] else: temp += (-1) * table[row][col] * init_val[col] lowercase__ : List[str]= (temp + val) / denom new_val.append(A ) lowercase__ : Optional[Any]= new_val return [float(A ) for i in new_val] def lowercase__(A ) ->bool: """simple docstring""" lowercase__, lowercase__ : Optional[int]= table.shape lowercase__ : Optional[Any]= True for i in range(0 , A ): lowercase__ : List[Any]= 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
85
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ): '''simple docstring''' lowercase__ : Optional[int]= parent lowercase__ : Tuple= batch_size lowercase__ : Tuple= seq_length lowercase__ : str= is_training lowercase__ : str= use_input_lengths lowercase__ : Any= use_token_type_ids lowercase__ : List[Any]= use_labels lowercase__ : Optional[int]= gelu_activation lowercase__ : str= sinusoidal_embeddings lowercase__ : List[str]= causal lowercase__ : Any= asm lowercase__ : Optional[int]= n_langs lowercase__ : Union[str, Any]= vocab_size lowercase__ : int= n_special lowercase__ : Any= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : List[str]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : Any= max_position_embeddings lowercase__ : List[Any]= type_vocab_size lowercase__ : int= type_sequence_label_size lowercase__ : Any= initializer_range lowercase__ : Optional[int]= num_labels lowercase__ : Union[str, Any]= num_choices lowercase__ : List[Any]= summary_type lowercase__ : Optional[int]= use_proj lowercase__ : int= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple= None if self.use_input_lengths: lowercase__ : List[Any]= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase__ : Tuple= None if self.use_token_type_ids: lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase__ : str= None lowercase__ : Tuple= None lowercase__ : Dict= None if self.use_labels: lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float() lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any]= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : Any= FlaubertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) lowercase__ : str= model(snake_case__ , langs=snake_case__ ) lowercase__ : Any= model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[str]= model(snake_case__ ) lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= model(snake_case__ ) lowercase__ : Any= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) lowercase__ : List[str]= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) ((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple() lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) ((lowercase__), ) : List[Any]= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ ) lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= self.num_labels lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : int= self.num_choices lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any= model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) : Any= config_and_inputs lowercase__ : Tuple= { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase__ : List[Any]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) lowercase__ : List[str]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModelTester(self ) lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase__ : int= True lowercase__ : List[Any]= model_class(config=snake_case__ ) lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : Dict= torch.jit.trace( snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) ) lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ ) loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) ) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowercase__ : Optional[int]= model(snake_case__ )[0] lowercase__ : Optional[int]= torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case__ ) lowercase__ : Dict= torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
85
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self , snake_case__=3 , snake_case__=3 , snake_case__=("DownEncoderBlock2D",) , snake_case__=(64,) , snake_case__=2 , snake_case__=32 , snake_case__="silu" , snake_case__=True , ): '''simple docstring''' super().__init__() lowercase__ : int= layers_per_block lowercase__ : Optional[int]= torch.nn.Convad( snake_case__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Dict= None lowercase__ : Tuple= nn.ModuleList([] ) # down lowercase__ : Tuple= block_out_channels[0] for i, down_block_type in enumerate(snake_case__ ): lowercase__ : Tuple= output_channel lowercase__ : int= block_out_channels[i] lowercase__ : str= i == len(snake_case__ ) - 1 lowercase__ : Optional[int]= get_down_block( snake_case__ , num_layers=self.layers_per_block , in_channels=snake_case__ , out_channels=snake_case__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , ) self.down_blocks.append(snake_case__ ) # mid lowercase__ : Optional[int]= UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , ) # out lowercase__ : str= nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=snake_case__ , eps=1e-6 ) lowercase__ : Optional[Any]= nn.SiLU() lowercase__ : Optional[Any]= 2 * out_channels if double_z else out_channels lowercase__ : List[Any]= nn.Convad(block_out_channels[-1] , snake_case__ , 3 , padding=1 ) lowercase__ : List[str]= False def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Optional[int]= x lowercase__ : Union[str, Any]= self.conv_in(snake_case__ ) if self.training and self.gradient_checkpointing: def create_custom_forward(snake_case__ ): def custom_forward(*snake_case__ ): return module(*snake_case__ ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: lowercase__ : Tuple= torch.utils.checkpoint.checkpoint( create_custom_forward(snake_case__ ) , snake_case__ , use_reentrant=snake_case__ ) # middle lowercase__ : Optional[int]= torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , use_reentrant=snake_case__ ) else: for down_block in self.down_blocks: lowercase__ : Dict= torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ ) # middle lowercase__ : Union[str, Any]= torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , snake_case__ ) else: # down for down_block in self.down_blocks: lowercase__ : List[Any]= down_block(snake_case__ ) # middle lowercase__ : List[Any]= self.mid_block(snake_case__ ) # post-process lowercase__ : Any= self.conv_norm_out(snake_case__ ) lowercase__ : Dict= self.conv_act(snake_case__ ) lowercase__ : Tuple= self.conv_out(snake_case__ ) return sample class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self , snake_case__=3 , snake_case__=3 , snake_case__=("UpDecoderBlock2D",) , snake_case__=(64,) , snake_case__=2 , snake_case__=32 , snake_case__="silu" , snake_case__="group" , ): '''simple docstring''' super().__init__() lowercase__ : Dict= layers_per_block lowercase__ : List[Any]= nn.Convad( snake_case__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : int= None lowercase__ : Dict= nn.ModuleList([] ) lowercase__ : List[str]= in_channels if norm_type == "spatial" else None # mid lowercase__ : List[str]= UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , ) # up lowercase__ : Optional[Any]= list(reversed(snake_case__ ) ) lowercase__ : List[Any]= reversed_block_out_channels[0] for i, up_block_type in enumerate(snake_case__ ): lowercase__ : List[Any]= output_channel lowercase__ : Any= reversed_block_out_channels[i] lowercase__ : Dict= i == len(snake_case__ ) - 1 lowercase__ : Any= get_up_block( snake_case__ , num_layers=self.layers_per_block + 1 , in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , resnet_time_scale_shift=snake_case__ , ) self.up_blocks.append(snake_case__ ) lowercase__ : Any= output_channel # out if norm_type == "spatial": lowercase__ : List[str]= SpatialNorm(block_out_channels[0] , snake_case__ ) else: lowercase__ : Any= nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=snake_case__ , eps=1e-6 ) lowercase__ : List[Any]= nn.SiLU() lowercase__ : List[Any]= nn.Convad(block_out_channels[0] , snake_case__ , 3 , padding=1 ) lowercase__ : Optional[int]= False def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None ): '''simple docstring''' lowercase__ : Tuple= z lowercase__ : Tuple= self.conv_in(snake_case__ ) lowercase__ : Any= next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(snake_case__ ): def custom_forward(*snake_case__ ): return module(*snake_case__ ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle lowercase__ : Union[str, Any]= torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ ) lowercase__ : Tuple= sample.to(snake_case__ ) # up for up_block in self.up_blocks: lowercase__ : Tuple= torch.utils.checkpoint.checkpoint( create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ ) else: # middle lowercase__ : List[str]= torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ ) lowercase__ : Any= sample.to(snake_case__ ) # up for up_block in self.up_blocks: lowercase__ : str= torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ ) else: # middle lowercase__ : str= self.mid_block(snake_case__ , snake_case__ ) lowercase__ : Tuple= sample.to(snake_case__ ) # up for up_block in self.up_blocks: lowercase__ : int= up_block(snake_case__ , snake_case__ ) # post-process if latent_embeds is None: lowercase__ : List[str]= self.conv_norm_out(snake_case__ ) else: lowercase__ : Tuple= self.conv_norm_out(snake_case__ , snake_case__ ) lowercase__ : Optional[Any]= self.conv_act(snake_case__ ) lowercase__ : str= self.conv_out(snake_case__ ) return sample class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__="random" , snake_case__=False , snake_case__=True ): '''simple docstring''' super().__init__() lowercase__ : List[Any]= n_e lowercase__ : Dict= vq_embed_dim lowercase__ : int= beta lowercase__ : Union[str, Any]= legacy lowercase__ : str= nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) lowercase__ : Union[str, Any]= remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) lowercase__ : Optional[Any]= self.used.shape[0] lowercase__ : Optional[int]= unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": lowercase__ : Dict= self.re_embed lowercase__ : str= self.re_embed + 1 print( F'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' F'''Using {self.unknown_index} for unknown indices.''' ) else: lowercase__ : Tuple= n_e lowercase__ : Optional[int]= sane_index_shape def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : str= inds.shape assert len(snake_case__ ) > 1 lowercase__ : Union[str, Any]= inds.reshape(ishape[0] , -1 ) lowercase__ : str= self.used.to(snake_case__ ) lowercase__ : str= (inds[:, :, None] == used[None, None, ...]).long() lowercase__ : Any= match.argmax(-1 ) lowercase__ : int= match.sum(2 ) < 1 if self.unknown_index == "random": lowercase__ : int= torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: lowercase__ : Optional[int]= self.unknown_index return new.reshape(snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Dict= inds.shape assert len(snake_case__ ) > 1 lowercase__ : str= inds.reshape(ishape[0] , -1 ) lowercase__ : Dict= self.used.to(snake_case__ ) if self.re_embed > self.used.shape[0]: # extra token lowercase__ : Dict= 0 # simply set to zero lowercase__ : Dict= torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , snake_case__ ) return back.reshape(snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' # reshape z -> (batch, height, width, channel) and flatten lowercase__ : List[str]= z.permute(0 , 2 , 3 , 1 ).contiguous() lowercase__ : Any= z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z lowercase__ : Optional[Any]= torch.argmin(torch.cdist(snake_case__ , self.embedding.weight ) , dim=1 ) lowercase__ : Optional[Any]= self.embedding(snake_case__ ).view(z.shape ) lowercase__ : List[Any]= None lowercase__ : str= None # compute loss for embedding if not self.legacy: lowercase__ : int= self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: lowercase__ : List[Any]= torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients lowercase__ : List[str]= z + (z_q - z).detach() # reshape back to match original input shape lowercase__ : str= z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: lowercase__ : Optional[int]= min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis lowercase__ : Union[str, Any]= self.remap_to_used(snake_case__ ) lowercase__ : Optional[Any]= min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: lowercase__ : Optional[int]= min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' # shape specifying (batch, height, width, channel) if self.remap is not None: lowercase__ : List[str]= indices.reshape(shape[0] , -1 ) # add batch axis lowercase__ : Any= self.unmap_to_all(snake_case__ ) lowercase__ : Tuple= indices.reshape(-1 ) # flatten again # get quantized latent vectors lowercase__ : str= self.embedding(snake_case__ ) if shape is not None: lowercase__ : Optional[Any]= z_q.view(snake_case__ ) # reshape back to match original input shape lowercase__ : Union[str, Any]= z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : str= parameters lowercase__, lowercase__ : int= torch.chunk(snake_case__ , 2 , dim=1 ) lowercase__ : int= torch.clamp(self.logvar , -30.0 , 20.0 ) lowercase__ : str= deterministic lowercase__ : Optional[Any]= torch.exp(0.5 * self.logvar ) lowercase__ : Any= torch.exp(self.logvar ) if self.deterministic: lowercase__ : List[Any]= torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def UpperCAmelCase_ ( self , snake_case__ = None ): '''simple docstring''' # make sure sample is on the same device as the parameters and has same dtype lowercase__ : str= randn_tensor( self.mean.shape , generator=snake_case__ , device=self.parameters.device , dtype=self.parameters.dtype ) lowercase__ : Union[str, Any]= self.mean + self.std * sample return x def UpperCAmelCase_ ( self , snake_case__=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) lowercase__ : Tuple= np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' return self.mean
85
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = None class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 2 @register_to_config def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ): '''simple docstring''' # standard deviation of the initial noise distribution lowercase__ : int= sigma_max # setable values lowercase__ : int= None lowercase__ : np.IntTensor= None lowercase__ : torch.FloatTensor= None # sigma(t_i) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return sample def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : List[Any]= num_inference_steps lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy() lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ ) lowercase__ : Union[str, Any]= [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowercase__ : str= 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device ) lowercase__ : str= sigma + gamma * sigma lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : int= sample_prev + sigma_prev * model_output lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' raise NotImplementedError()
85
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def lowercase__(A , A , A=None , A=None ) ->Tuple: """simple docstring""" if attention_mask is None: lowercase__ : Optional[int]= tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class __UpperCAmelCase: """simple docstring""" __lowerCamelCase = OPTConfig __lowerCamelCase = {} __lowerCamelCase = "gelu" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=16 , snake_case__=2 , snake_case__=4 , snake_case__=4 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , snake_case__=16 , snake_case__=16 , ): '''simple docstring''' lowercase__ : Union[str, Any]= parent lowercase__ : int= batch_size lowercase__ : int= seq_length lowercase__ : str= is_training lowercase__ : int= use_labels lowercase__ : List[str]= vocab_size lowercase__ : Dict= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : str= num_attention_heads lowercase__ : Optional[Any]= intermediate_size lowercase__ : List[str]= hidden_act lowercase__ : Dict= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : Optional[Any]= max_position_embeddings lowercase__ : Optional[Any]= eos_token_id lowercase__ : Any= pad_token_id lowercase__ : List[str]= bos_token_id lowercase__ : str= embed_dim lowercase__ : Union[str, Any]= word_embed_proj_dim lowercase__ : int= False def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase__ : Union[str, Any]= tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase__ : List[str]= tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase__ : str= self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case__ , **self.config_updates , ) lowercase__ : Optional[Any]= prepare_opt_inputs_dict(snake_case__ , snake_case__ ) return config, inputs_dict def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Union[str, Any]= TFOPTModel(config=snake_case__ ) lowercase__ : Any= inputs_dict["input_ids"] lowercase__ : Optional[int]= input_ids[:1, :] lowercase__ : Dict= inputs_dict["attention_mask"][:1, :] lowercase__ : int= 1 # first forward pass lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ ) lowercase__, lowercase__ : str= outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase__ : int= ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__ : Optional[int]= tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowercase__ : List[Any]= tf.concat([input_ids, next_tokens] , axis=-1 ) lowercase__ : List[Any]= tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowercase__ : List[str]= model(snake_case__ , attention_mask=snake_case__ )[0] lowercase__ : Union[str, Any]= model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowercase__ : List[Any]= int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowercase__ : Any= output_from_no_past[:, -3:, random_slice_idx] lowercase__ : Any= output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 ) @require_tf class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () __lowerCamelCase = (TFOPTForCausalLM,) if is_tf_available() else () __lowerCamelCase = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = 10 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= TFOPTModelTester(self ) lowercase__ : Optional[Any]= ConfigTester(self , config_class=snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : int= self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(snake_case__ , snake_case__ ): if hasattr(snake_case__ , "weight" ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(snake_case__ , "weight" ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings lowercase__ : List[Any]= model_class(config=snake_case__ ) lowercase__ : Optional[int]= _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() ) lowercase__ : Union[str, Any]= _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(snake_case__ ) lowercase__ : List[str]= _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() ) lowercase__ : List[Any]= _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. lowercase__ : str= size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , snake_case__ ) # check that weights remain the same after resizing lowercase__ : Any= True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowercase__ : Dict= False self.assertTrue(snake_case__ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , snake_case__ ) lowercase__ : List[str]= True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowercase__ : str= False self.assertTrue(snake_case__ ) def lowercase__(A ) ->int: """simple docstring""" return tf.constant(A , dtype=tf.intaa ) @require_tf class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" __lowerCamelCase = 99 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= tf.ones((4, 1) , dtype=tf.intaa ) * 2 lowercase__ : Dict= tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) lowercase__ : str= input_ids.shape[0] lowercase__ : Optional[int]= OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= TFOPTModel.from_pretrained("facebook/opt-350m" ) lowercase__ : str= _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) lowercase__ : Tuple= tf.not_equal(snake_case__ , model.config.pad_token_id ) with tf.GradientTape(): lowercase__ : Tuple= model(input_ids=snake_case__ , attention_mask=snake_case__ ).last_hidden_state lowercase__ : List[str]= (1, 11, 512) self.assertEqual(output.shape , snake_case__ ) lowercase__ : Dict= tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-3 ) ) lowercase__ : int= tf.function(snake_case__ , jit_compile=snake_case__ ) lowercase__ : List[str]= xla_generate(snake_case__ , snake_case__ )[0] self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-2 ) ) @require_tf @slow class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' super().setUp() lowercase__ : List[str]= "facebook/opt-350m" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= TFOPTForCausalLM.from_pretrained(self.path_model ) lowercase__ : str= GPTaTokenizer.from_pretrained(self.path_model ) lowercase__ : Tuple= [ "Today is a beautiful day and I want to", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False lowercase__ : int= tokenizer(snake_case__ , return_tensors="tf" , padding=snake_case__ , add_special_tokens=snake_case__ ) lowercase__ : int= tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) lowercase__ : int= tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) ) lowercase__ : int= tf.function(snake_case__ , jit_compile=snake_case__ ) lowercase__ : Optional[Any]= tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) ) @require_tf @slow class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= "facebook/opt-125m" lowercase__ : List[str]= [ "Today is a beautiful day and I want to", "In the city of New York, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] lowercase__ : Union[str, Any]= [] lowercase__ : Optional[Any]= GPTaTokenizer.from_pretrained(snake_case__ ) lowercase__ : Any= TFOPTForCausalLM.from_pretrained(snake_case__ ) for prompt in self.prompts: lowercase__ : Optional[int]= tokenizer(snake_case__ , return_tensors="tf" ).input_ids lowercase__ : List[str]= model.generate(snake_case__ , max_length=10 ) lowercase__ : Union[str, Any]= tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ ) predicted_outputs += generated_string self.assertListEqual(snake_case__ , snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= "facebook/opt-350m" lowercase__ : Optional[Any]= GPTaTokenizer.from_pretrained(snake_case__ ) lowercase__ : Optional[int]= TFOPTForCausalLM.from_pretrained(snake_case__ ) lowercase__ : List[str]= "left" # use different length sentences to test batching lowercase__ : Dict= [ "Hello, my dog is a little", "Today, I", ] lowercase__ : Optional[int]= tokenizer(snake_case__ , return_tensors="tf" , padding=snake_case__ ) lowercase__ : int= inputs["input_ids"] lowercase__ : int= model.generate(input_ids=snake_case__ , attention_mask=inputs["attention_mask"] ) lowercase__ : Dict= tokenizer(sentences[0] , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= model.generate(input_ids=snake_case__ ) lowercase__ : Tuple= inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs["attention_mask"][-1] , tf.intaa ) ) lowercase__ : Optional[Any]= tokenizer(sentences[1] , return_tensors="tf" ).input_ids lowercase__ : Union[str, Any]= model.generate(input_ids=snake_case__ , max_length=model.config.max_length - num_paddings ) lowercase__ : Tuple= tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ ) lowercase__ : Optional[Any]= tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ ) lowercase__ : int= tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ ) lowercase__ : List[Any]= [ "Hello, my dog is a little bit of a dork.\nI'm a little bit", "Today, I was in the middle of a conversation with a friend about the", ] self.assertListEqual(snake_case__ , snake_case__ ) self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= "facebook/opt-350m" lowercase__ : Optional[int]= [ "Today is a beautiful day and I want to", "In the city of San Francisco, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] lowercase__ : Dict= [] lowercase__ : Optional[int]= GPTaTokenizer.from_pretrained(snake_case__ ) lowercase__ : int= TFOPTForCausalLM.from_pretrained(snake_case__ ) for prompt in self.prompts: lowercase__ : Dict= tokenizer(snake_case__ , return_tensors="tf" ).input_ids lowercase__ : Any= model.generate(snake_case__ , max_length=10 ) lowercase__ : Tuple= tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ ) predicted_outputs += generated_string self.assertListEqual(snake_case__ , snake_case__ )
85
"""simple docstring""" from ....utils import logging a : List[str] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ): '''simple docstring''' lowercase__ : Dict= config.__dict__ lowercase__ : str= modal_hidden_size if num_labels: lowercase__ : List[str]= num_labels
85
1
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowercase__(A , A , A ) ->int: """simple docstring""" lowercase__ : List[Any]= 1.5 lowercase__ : str= int(factor * num_class_images ) lowercase__ : str= ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=A , aesthetic_weight=0.1 ) os.makedirs(f'''{class_data_dir}/images''' , exist_ok=A ) if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase__ : int= client.query(text=A ) if len(A ) >= factor * num_class_images or num_images > 1e4: break else: lowercase__ : str= int(factor * num_images ) lowercase__ : str= ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=A , aesthetic_weight=0.1 , ) lowercase__ : Optional[int]= 0 lowercase__ : Optional[Any]= 0 lowercase__ : Union[str, Any]= tqdm(desc="downloading real regularization images" , total=A ) with open(f'''{class_data_dir}/caption.txt''' , "w" ) as fa, open(f'''{class_data_dir}/urls.txt''' , "w" ) as fa, open( f'''{class_data_dir}/images.txt''' , "w" ) as fa: while total < num_class_images: lowercase__ : List[Any]= class_images[count] count += 1 try: lowercase__ : List[str]= requests.get(images["url"] ) if img.status_code == 200: lowercase__ : Any= Image.open(BytesIO(img.content ) ) with open(f'''{class_data_dir}/images/{total}.jpg''' , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(f'''{class_data_dir}/images/{total}.jpg''' + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowercase__() ->str: """simple docstring""" lowercase__ : int= argparse.ArgumentParser("" , add_help=A ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=A , type=A ) parser.add_argument("--class_data_dir" , help="path to save images" , required=A , type=A ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=A ) return parser.parse_args() if __name__ == "__main__": a : List[Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
85
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Optional[int]= [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : Any= [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def lowercase__(A ) ->List[Any]: """simple docstring""" lowercase__ : Dict= [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") ) return token def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Dict= [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowercase__(A , A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : List[str]= "imagenet-1k-id2label.json" lowercase__ : List[str]= 1_000 lowercase__ : Tuple= "huggingface/label-files" lowercase__ : int= num_labels lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) ) lowercase__ : str= {int(A ): v for k, v in idalabel.items()} lowercase__ : Optional[int]= idalabel lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()} lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowercase__ : int= [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowercase__ : Union[str, Any]= [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Optional[Any]= [2, 2, 20] lowercase__ : Optional[Any]= [3, 12, 16] lowercase__ : List[str]= [192, 768, 1_024] lowercase__ : List[str]= CvtForImageClassification(A ) lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowercase__ : Dict= image_size lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) ) lowercase__ : Optional[Any]= OrderedDict() lowercase__ : Tuple= [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Optional[int]= list_of_state_dict + cls_token(A ) lowercase__ : List[str]= list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): lowercase__ : Dict= list_of_state_dict + attention(A , A ) lowercase__ : Optional[Any]= list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): lowercase__ : str= original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
85
1
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping a : Union[str, Any] = tuple[int, int] class __UpperCAmelCase: """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : set[int]= vertices lowercase__ : dict[EdgeT, int]= { (min(snake_case__ ), max(snake_case__ )): weight for edge, weight in edges.items() } def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowercase__ : Any= weight def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Graph= Graph({min(self.vertices )} , {} ) lowercase__ : EdgeT lowercase__ : int lowercase__ : EdgeT lowercase__ : int while len(subgraph.vertices ) < len(self.vertices ): lowercase__ : str= max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowercase__ : Any= edge lowercase__ : Optional[int]= weight subgraph.add_edge(snake_case__ , snake_case__ ) return subgraph def lowercase__(A = "p107_network.txt" ) ->int: """simple docstring""" lowercase__ : str= os.path.abspath(os.path.dirname(A ) ) lowercase__ : str= os.path.join(A , A ) lowercase__ : dict[EdgeT, int]= {} lowercase__ : list[str] lowercase__ : int lowercase__ : int with open(A ) as f: lowercase__ : List[Any]= f.read().strip().split("\n" ) lowercase__ : Any= [line.split("," ) for line in data] for edgea in range(1 , len(A ) ): for edgea in range(A ): if adjaceny_matrix[edgea][edgea] != "-": lowercase__ : List[Any]= int(adjaceny_matrix[edgea][edgea] ) lowercase__ : Graph= Graph(set(range(len(A ) ) ) , A ) lowercase__ : Graph= graph.prims_algorithm() lowercase__ : int= sum(graph.edges.values() ) lowercase__ : int= sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"""{solution() = }""")
85
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[Any]= self.unet.config.sample_size lowercase__ : Dict= (batch_size, 3, img_size, img_size) lowercase__ : List[Any]= self.unet lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma lowercase__ : Tuple= sample.to(self.device ) self.scheduler.set_timesteps(snake_case__ ) self.scheduler.set_sigmas(snake_case__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # prediction step lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ) lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean lowercase__ : List[str]= sample_mean.clamp(0 , 1 ) lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : str= self.numpy_to_pil(snake_case__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case__ )
85
1
"""simple docstring""" # Function to print upper half of diamond (pyramid) def lowercase__(A ) ->Tuple: """simple docstring""" for i in range(0 , A ): for _ in range(0 , n - i - 1 ): # printing spaces print(" " , end="" ) for _ in range(0 , i + 1 ): # printing stars print("* " , end="" ) print() def lowercase__(A ) ->Tuple: """simple docstring""" for i in range(A , 0 , -1 ): for _ in range(A , 0 , -1 ): # printing stars print("* " , end="" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(" " , end="" ) def lowercase__(A ) ->Dict: """simple docstring""" if n <= 0: print(" ... .... nothing printing :(" ) return floyd(A ) # upper half reverse_floyd(A ) # lower half if __name__ == "__main__": print(r"""| /\ | |- | |- |--| |\ /| |-""") print(r"""|/ \| |- |_ |_ |__| | \/ | |_""") a : List[Any] = 1 while K: a : int = int(input("""enter the number and , and see the magic : """)) print() pretty_print(user_number) a : List[str] = int(input("""press 0 to exit... and 1 to continue...""")) print("""Good Bye...""")
85
"""simple docstring""" def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[str]= len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: lowercase__, lowercase__ : List[str]= numbers[j], numbers[i] return numbers if __name__ == "__main__": a : Dict = input("""Enter numbers separated by a comma:\n""").strip() a : List[str] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
85
1
"""simple docstring""" a : List[str] = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ a : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}] a : str = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
85
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowercase__(A ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__() ->Iterator[int]: """simple docstring""" lowercase__ : Union[str, Any]= 2 while True: if is_prime(A ): yield num num += 1 def lowercase__(A = 2_000_000 ) ->int: """simple docstring""" return sum(takewhile(lambda A : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
85
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging a : Dict = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["input_features", "attention_mask"] def __init__( self , snake_case__=80 , snake_case__=16000 , snake_case__=0.0 , snake_case__=10 , snake_case__=25 , snake_case__="hamming_window" , snake_case__=3_27_68.0 , snake_case__=0.97 , snake_case__=1.0 , snake_case__=True , snake_case__=True , snake_case__=False , **snake_case__ , ): '''simple docstring''' super().__init__(feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , **snake_case__ ) lowercase__ : int= feature_size lowercase__ : Any= sampling_rate lowercase__ : List[str]= padding_value lowercase__ : str= hop_length lowercase__ : Optional[int]= win_length lowercase__ : Any= frame_signal_scale lowercase__ : List[str]= preemphasis_coeff lowercase__ : Optional[Any]= mel_floor lowercase__ : Optional[int]= normalize_means lowercase__ : List[str]= normalize_vars lowercase__ : Dict= win_function lowercase__ : List[str]= return_attention_mask lowercase__ : Any= win_length * sampling_rate // 1000 lowercase__ : List[Any]= hop_length * sampling_rate // 1000 lowercase__ : Union[str, Any]= optimal_fft_length(self.sample_size ) lowercase__ : Any= (self.n_fft // 2) + 1 def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' if self.win_function == "hamming_window": lowercase__ : List[str]= window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case__ ) else: lowercase__ : Optional[Any]= window_function(window_length=self.sample_size , name=self.win_function ) lowercase__ : Any= mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) lowercase__ : Dict= spectrogram( one_waveform * self.frame_signal_scale , window=snake_case__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case__ , preemphasis=self.preemphasis_coeff , mel_filters=snake_case__ , mel_floor=self.mel_floor , log_mel="log" , ) return msfc_features.T def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' # make sure we normalize float32 arrays if self.normalize_means: lowercase__ : Optional[int]= x[:input_length].mean(axis=0 ) lowercase__ : int= np.subtract(snake_case__ , snake_case__ ) if self.normalize_vars: lowercase__ : List[str]= x[:input_length].std(axis=0 ) lowercase__ : Union[str, Any]= np.divide(snake_case__ , snake_case__ ) if input_length < x.shape[0]: lowercase__ : Optional[int]= padding_value # make sure array is in float32 lowercase__ : Union[str, Any]= x.astype(np.floataa ) return x def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : Optional[int]= attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(snake_case__ , snake_case__ , self.padding_value ) for x, n in zip(snake_case__ , snake_case__ )] def __call__( self , snake_case__ , snake_case__ = False , snake_case__ = None , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , **snake_case__ , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ : Optional[int]= isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowercase__ : int= is_batched_numpy or ( isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ : Union[str, Any]= [np.asarray(snake_case__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case__ , np.ndarray ): lowercase__ : Optional[Any]= np.asarray(snake_case__ , dtype=np.floataa ) elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ : Union[str, Any]= raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ : List[str]= [raw_speech] # extract fbank features lowercase__ : List[Any]= [self._extract_mfsc_features(snake_case__ ) for one_waveform in raw_speech] # convert into correct format for padding lowercase__ : Optional[int]= BatchFeature({"input_features": features} ) lowercase__ : Dict= self.pad( snake_case__ , padding=snake_case__ , max_length=snake_case__ , truncation=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , ) # make sure list is in array format lowercase__ : Dict= padded_inputs.get("input_features" ) if isinstance(input_features[0] , snake_case__ ): lowercase__ : Any= [np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_features] lowercase__ : int= padded_inputs.get("attention_mask" ) if attention_mask is not None: lowercase__ : List[Any]= [np.asarray(snake_case__ , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: lowercase__ : List[str]= ( np.array(snake_case__ , dtype=np.intaa ) if self._get_padding_strategies(snake_case__ , max_length=snake_case__ ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) lowercase__ : int= self.normalize( padded_inputs["input_features"] , attention_mask=snake_case__ ) if return_tensors is not None: lowercase__ : Tuple= padded_inputs.convert_to_tensors(snake_case__ ) return padded_inputs
85
"""simple docstring""" def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Tuple= (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowercase__(A = 5_000 ) ->int: """simple docstring""" lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )] for i, pentagonal_i in enumerate(A ): for j in range(A , len(A ) ): lowercase__ : List[Any]= pentagonal_nums[j] lowercase__ : int= pentagonal_i + pentagonal_j lowercase__ : Optional[int]= pentagonal_j - pentagonal_i if is_pentagonal(A ) and is_pentagonal(A ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
85
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig a : List[str] = logging.get_logger(__name__) a : List[str] = { """Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""", # See all DPT models at https://huggingface.co/models?filter=dpt } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "dpt" def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=384 , snake_case__=16 , snake_case__=3 , snake_case__=False , snake_case__=True , snake_case__=[2, 5, 8, 11] , snake_case__="project" , snake_case__=[4, 2, 1, 0.5] , snake_case__=[96, 192, 384, 768] , snake_case__=256 , snake_case__=-1 , snake_case__=False , snake_case__=True , snake_case__=0.4 , snake_case__=255 , snake_case__=0.1 , snake_case__=[1, 1024, 24, 24] , snake_case__=[0, 1] , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Union[str, Any]= hidden_size lowercase__ : List[str]= is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("Initializing the config with a `BiT` backbone." ) lowercase__ : Optional[Any]= { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } lowercase__ : str= BitConfig(**snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): logger.info("Initializing the config with a `BiT` backbone." ) lowercase__ : Optional[Any]= BitConfig(**snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): lowercase__ : str= backbone_config else: raise ValueError( F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) lowercase__ : Any= backbone_featmap_shape lowercase__ : Optional[Any]= neck_ignore_stages if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." ) else: lowercase__ : List[Any]= None lowercase__ : int= None lowercase__ : List[Any]= [] lowercase__ : Tuple= num_hidden_layers lowercase__ : Union[str, Any]= num_attention_heads lowercase__ : Union[str, Any]= intermediate_size lowercase__ : Union[str, Any]= hidden_act lowercase__ : int= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : List[str]= initializer_range lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : List[Any]= image_size lowercase__ : List[str]= patch_size lowercase__ : str= num_channels lowercase__ : Dict= qkv_bias lowercase__ : List[Any]= backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" ) lowercase__ : List[Any]= readout_type lowercase__ : List[str]= reassemble_factors lowercase__ : Optional[Any]= neck_hidden_sizes lowercase__ : Union[str, Any]= fusion_hidden_size lowercase__ : Any= head_in_index lowercase__ : int= use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) lowercase__ : Optional[int]= use_auxiliary_head lowercase__ : Tuple= auxiliary_loss_weight lowercase__ : Dict= semantic_loss_ignore_index lowercase__ : Any= semantic_classifier_dropout def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowercase__ : Union[str, Any]= self.backbone_config.to_dict() lowercase__ : int= self.__class__.model_type return output
85
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Union[str, Any] = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_text_model" __lowerCamelCase = ["past_key_values"] __lowerCamelCase = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' lowercase__ : int= vocab_size lowercase__ : Optional[Any]= hidden_size lowercase__ : Tuple= d_kv lowercase__ : Optional[int]= d_ff lowercase__ : Any= num_layers lowercase__ : Dict= num_heads lowercase__ : List[Any]= relative_attention_num_buckets lowercase__ : Optional[Any]= relative_attention_max_distance lowercase__ : Dict= dropout_rate lowercase__ : Tuple= layer_norm_epsilon lowercase__ : str= initializer_factor lowercase__ : Any= use_cache lowercase__ : Optional[int]= eos_token_id lowercase__ : str= decoder_start_token_id # for backwards compatibility lowercase__ : Optional[Any]= dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : str= config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_vision_model" def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Tuple= hidden_size lowercase__ : Tuple= patch_embed_hidden_size lowercase__ : Optional[Any]= d_ff lowercase__ : Dict= dropout_rate lowercase__ : Any= num_hidden_layers lowercase__ : Optional[int]= num_attention_heads lowercase__ : Dict= initializer_range lowercase__ : Tuple= initializer_factor lowercase__ : Tuple= attention_dropout lowercase__ : Optional[Any]= layer_norm_eps lowercase__ : List[Any]= dense_act_fn lowercase__ : str= seq_len lowercase__ : List[str]= relative_attention_num_buckets lowercase__ : Union[str, Any]= relative_attention_max_distance lowercase__ : Dict= d_kv @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : Union[str, Any]= config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct" __lowerCamelCase = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase__ : List[Any]= {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowercase__ : str= {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowercase__ : str= PixaStructTextConfig(**snake_case__ ) lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ ) lowercase__ : int= self.text_config.decoder_start_token_id lowercase__ : List[Any]= self.text_config.pad_token_id lowercase__ : Any= self.text_config.eos_token_id lowercase__ : Any= initializer_factor lowercase__ : int= initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : Dict= is_vqa @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ ) lowercase__ : str= self.text_config.to_dict() lowercase__ : str= self.vision_config.to_dict() lowercase__ : List[str]= self.__class__.model_type return output
85
1
"""simple docstring""" from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
85
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" ) lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy() lowercase__ : int= -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
85
1
"""simple docstring""" import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = FlaxAutoencoderKL @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= 4 lowercase__ : str= 3 lowercase__ : Dict= (32, 32) lowercase__ : Dict= jax.random.PRNGKey(0 ) lowercase__ : Union[str, Any]= jax.random.uniform(snake_case__ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } lowercase__ : Tuple= self.dummy_input return init_dict, inputs_dict
85
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "BridgeTowerImageProcessor" __lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[int]= self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # add pixel_values + pixel_mask lowercase__ : Optional[int]= self.image_processor( snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ ) encoding.update(snake_case__ ) return encoding def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.tokenizer.model_input_names lowercase__ : List[Any]= self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
85
1
"""simple docstring""" from __future__ import annotations from random import random class __UpperCAmelCase: """simple docstring""" def __init__( self , snake_case__ = None ): '''simple docstring''' lowercase__ : Union[str, Any]= value lowercase__ : Tuple= random() lowercase__ : Node | None= None lowercase__ : Node | None= None def __repr__( self ): '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return F'''\'{self.value}: {self.prior:.5}\'''' else: return pformat( {F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 ) def __str__( self ): '''simple docstring''' lowercase__ : Optional[Any]= str(self.value ) + " " lowercase__ : List[Any]= str(self.left or "" ) lowercase__ : str= str(self.right or "" ) return value + left + right def lowercase__(A , A ) ->tuple[Node | None, Node | None]: """simple docstring""" if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: lowercase__, lowercase__ : Union[str, Any]= split(root.left , A ) return left, root else: lowercase__, lowercase__ : Dict= split(root.right , A ) return root, right def lowercase__(A , A ) ->Node | None: """simple docstring""" if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: lowercase__ : Any= merge(left.right , A ) return left else: lowercase__ : Optional[int]= merge(A , right.left ) return right def lowercase__(A , A ) ->Node | None: """simple docstring""" lowercase__ : int= Node(A ) lowercase__, lowercase__ : Dict= split(A , A ) return merge(merge(A , A ) , A ) def lowercase__(A , A ) ->Node | None: """simple docstring""" lowercase__, lowercase__ : Tuple= split(A , value - 1 ) lowercase__, lowercase__ : Dict= split(A , A ) return merge(A , A ) def lowercase__(A ) ->None: """simple docstring""" if not root: # None return else: inorder(root.left ) print(root.value , end="," ) inorder(root.right ) def lowercase__(A , A ) ->Node | None: """simple docstring""" for arg in args.split(): if arg[0] == "+": lowercase__ : Dict= insert(A , int(arg[1:] ) ) elif arg[0] == "-": lowercase__ : Any= erase(A , int(arg[1:] ) ) else: print("Unknown command" ) return root def lowercase__() ->None: """simple docstring""" lowercase__ : int= None print( "enter numbers to create a tree, + value to add value into treap, " "- value to erase all nodes with value. 'q' to quit. " ) lowercase__ : Optional[int]= input() while args != "q": lowercase__ : Tuple= interact_treap(A , A ) print(A ) lowercase__ : str= input() print("good by!" ) if __name__ == "__main__": import doctest doctest.testmod() main()
85
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= tempfile.mkdtemp() lowercase__ : Optional[Any]= 8 # DPR tok lowercase__ : Tuple= [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok lowercase__ : List[Any]= [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple= {"unk_token": "<unk>"} lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.get_dummy_dataset() lowercase__ : Optional[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= dataset lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Dict= self.get_dummy_dataset() lowercase__ : Tuple= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" ) lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset lowercase__ : List[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(snake_case__ , open(snake_case__ , "wb" ) ) lowercase__ : List[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) lowercase__ : Optional[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= self.get_dummy_dataset() retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Tuple= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= 1 lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : int= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : int= self.get_dummy_legacy_index_retriever() lowercase__ : Optional[Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : str= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' import torch lowercase__ : str= 1 lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : str= [[5, 7], [10, 11]] lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) lowercase__, lowercase__, lowercase__ : Optional[int]= ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , np.ndarray ) lowercase__ : Any= retriever( snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , ) lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer() lowercase__ : Dict= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) retriever.set_ctx_encoder_tokenizer(snake_case__ ) lowercase__ : List[str]= [[5, 7], [10, 11]] lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) self.assertEqual( len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
85
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[Any] = { """configuration_nllb_moe""": [ """NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NllbMoeConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ """NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""", """NllbMoeForConditionalGeneration""", """NllbMoeModel""", """NllbMoePreTrainedModel""", """NllbMoeTop2Router""", """NllbMoeSparseMLP""", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "AutoImageProcessor" __lowerCamelCase = "AutoTokenizer" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) lowercase__ : List[Any]= self.image_processor def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: lowercase__ : Any= image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
85
1
"""simple docstring""" import math import qiskit def lowercase__(A = 1 , A = 1 , A = 1 ) ->qiskit.result.counts.Counts: """simple docstring""" if ( isinstance(A , A ) or isinstance(A , A ) or isinstance(A , A ) ): raise TypeError("inputs must be integers." ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError("inputs must be positive." ) if ( (math.floor(A ) != input_a) or (math.floor(A ) != input_a) or (math.floor(A ) != carry_in) ): raise ValueError("inputs must be exact integers." ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError("inputs must be less or equal to 2." ) # build registers lowercase__ : List[str]= qiskit.QuantumRegister(4 , "qr" ) lowercase__ : Dict= qiskit.ClassicalRegister(2 , "cr" ) # list the entries lowercase__ : Optional[int]= [input_a, input_a, carry_in] lowercase__ : Any= qiskit.QuantumCircuit(A , A ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(A ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(A ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(A ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , A ) # measure the last two qbits lowercase__ : int= qiskit.Aer.get_backend("aer_simulator" ) lowercase__ : Union[str, Any]= qiskit.execute(A , A , shots=1_000 ) return job.result().get_counts(A ) if __name__ == "__main__": print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
85
"""simple docstring""" a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/""" def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ): lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ : Tuple= len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ : str= b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ : str= ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ : Optional[Any]= encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ : List[Any]= encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ : str= encoded_data[:-padding] lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ : Any= [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
85
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : Optional[int] = { """configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[Any] = ["""BloomTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = [ """BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""", """BloomForCausalLM""", """BloomModel""", """BloomPreTrainedModel""", """BloomForSequenceClassification""", """BloomForTokenClassification""", """BloomForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
"""simple docstring""" from __future__ import annotations def lowercase__(A ) ->list[int]: # This function is recursive """simple docstring""" lowercase__ : int= len(A ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowercase__ : str= array[0] lowercase__ : Optional[Any]= False lowercase__ : Any= 1 lowercase__ : list[int]= [] while not is_found and i < array_length: if array[i] < pivot: lowercase__ : Union[str, Any]= True lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]] lowercase__ : Union[str, Any]= longest_subsequence(A ) if len(A ) > len(A ): lowercase__ : List[str]= temp_array else: i += 1 lowercase__ : List[str]= [element for element in array[1:] if element >= pivot] lowercase__ : List[str]= [pivot, *longest_subsequence(A )] if len(A ) > len(A ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
85
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) a : str = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Union[str, Any] = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Union[str, Any] = ["""LayoutLMv2FeatureExtractor"""] a : str = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) a : List[str] = parser.parse_args() a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) a : Optional[Any] = CLIPImageProcessor() a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") a : Tuple = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
1
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = StableDiffusionControlNetImgaImgPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ : Union[str, Any]= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowercase__ : Optional[Any]= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowercase__ : Dict= DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , ) torch.manual_seed(0 ) lowercase__ : str= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ : Optional[Any]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase__ : str= CLIPTextModel(snake_case__ ) lowercase__ : Optional[int]= CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowercase__ : List[str]= { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCAmelCase_ ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): lowercase__ : Optional[Any]= torch.manual_seed(snake_case__ ) else: lowercase__ : Union[str, Any]= torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) lowercase__ : Tuple= 2 lowercase__ : Optional[int]= randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=snake_case__ , device=torch.device(snake_case__ ) , ) lowercase__ : Optional[int]= floats_tensor(control_image.shape , rng=random.Random(snake_case__ ) ).to(snake_case__ ) lowercase__ : List[str]= image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ : Any= Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((64, 64) ) lowercase__ : Union[str, Any]= { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": control_image, } return inputs def UpperCAmelCase_ ( self ): '''simple docstring''' return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCAmelCase_ ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = StableDiffusionControlNetImgaImgPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __lowerCamelCase = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCAmelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ : Any= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(snake_case__ ): if isinstance(snake_case__ , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowercase__ : Optional[int]= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(snake_case__ ) torch.manual_seed(0 ) lowercase__ : List[Any]= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(snake_case__ ) torch.manual_seed(0 ) lowercase__ : Optional[int]= DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , ) torch.manual_seed(0 ) lowercase__ : Dict= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ : Optional[int]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase__ : str= CLIPTextModel(snake_case__ ) lowercase__ : List[Any]= CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowercase__ : Optional[int]= MultiControlNetModel([controlneta, controlneta] ) lowercase__ : Dict= { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCAmelCase_ ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): lowercase__ : Optional[int]= torch.manual_seed(snake_case__ ) else: lowercase__ : Optional[int]= torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) lowercase__ : Union[str, Any]= 2 lowercase__ : Dict= [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=snake_case__ , device=torch.device(snake_case__ ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=snake_case__ , device=torch.device(snake_case__ ) , ), ] lowercase__ : List[Any]= floats_tensor(control_image[0].shape , rng=random.Random(snake_case__ ) ).to(snake_case__ ) lowercase__ : List[Any]= image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ : Union[str, Any]= Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((64, 64) ) lowercase__ : List[str]= { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": control_image, } return inputs def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.get_dummy_components() lowercase__ : List[str]= self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) lowercase__ : str= 10.0 lowercase__ : Optional[Any]= 4 lowercase__ : List[str]= self.get_dummy_inputs(snake_case__ ) lowercase__ : List[str]= steps lowercase__ : Dict= scale lowercase__ : Union[str, Any]= pipe(**snake_case__ )[0] lowercase__ : int= self.get_dummy_inputs(snake_case__ ) lowercase__ : Dict= steps lowercase__ : Any= scale lowercase__ : int= pipe(**snake_case__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowercase__ : int= self.get_dummy_inputs(snake_case__ ) lowercase__ : List[str]= steps lowercase__ : List[str]= scale lowercase__ : int= pipe(**snake_case__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowercase__ : Any= self.get_dummy_inputs(snake_case__ ) lowercase__ : Dict= steps lowercase__ : Any= scale lowercase__ : Optional[Any]= pipe(**snake_case__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def UpperCAmelCase_ ( self ): '''simple docstring''' return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCAmelCase_ ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.get_dummy_components() lowercase__ : Any= self.pipeline_class(**snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(snake_case__ ) except NotImplementedError: pass @slow @require_torch_gpu class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" ) lowercase__ : List[Any]= StableDiffusionControlNetImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , safety_checker=snake_case__ , controlnet=snake_case__ ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=snake_case__ ) lowercase__ : Any= torch.Generator(device="cpu" ).manual_seed(0 ) lowercase__ : Tuple= "evil space-punk bird" lowercase__ : Union[str, Any]= load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) ) lowercase__ : List[str]= load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) ) lowercase__ : Dict= pipe( snake_case__ , snake_case__ , control_image=snake_case__ , generator=snake_case__ , output_type="np" , num_inference_steps=50 , strength=0.6 , ) lowercase__ : str= output.images[0] assert image.shape == (512, 512, 3) lowercase__ : Tuple= load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" ) assert np.abs(expected_image - image ).max() < 9e-2
85
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
1
"""simple docstring""" from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": a : int = input("""Enter image url: """).strip() print(F"""Downloading image from {url} ...""") a : List[Any] = BeautifulSoup(requests.get(url).content, """html.parser""") # The image URL is in the content field of the first meta tag with property og:image a : Dict = soup.find("""meta""", {"""property""": """og:image"""})["""content"""] a : int = requests.get(image_url).content a : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, """wb""") as fp: fp.write(image_data) print(F"""Done. Image saved to disk as {file_name}.""")
85
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging a : Dict = logging.get_logger(__name__) a : Optional[int] = { """deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "perceiver" def __init__( self , snake_case__=256 , snake_case__=1280 , snake_case__=768 , snake_case__=1 , snake_case__=26 , snake_case__=8 , snake_case__=8 , snake_case__=None , snake_case__=None , snake_case__="kv" , snake_case__=1 , snake_case__=1 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=262 , snake_case__=2048 , snake_case__=56 , snake_case__=[368, 496] , snake_case__=16 , snake_case__=1920 , snake_case__=16 , snake_case__=[1, 16, 224, 224] , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Optional[Any]= num_latents lowercase__ : Optional[int]= d_latents lowercase__ : Tuple= d_model lowercase__ : int= num_blocks lowercase__ : Tuple= num_self_attends_per_block lowercase__ : Any= num_self_attention_heads lowercase__ : Dict= num_cross_attention_heads lowercase__ : Optional[Any]= qk_channels lowercase__ : int= v_channels lowercase__ : Optional[Any]= cross_attention_shape_for_attention lowercase__ : Dict= self_attention_widening_factor lowercase__ : Dict= cross_attention_widening_factor lowercase__ : Optional[int]= hidden_act lowercase__ : Any= attention_probs_dropout_prob lowercase__ : Optional[int]= initializer_range lowercase__ : List[str]= layer_norm_eps lowercase__ : str= use_query_residual # masked language modeling attributes lowercase__ : List[str]= vocab_size lowercase__ : List[str]= max_position_embeddings # image classification attributes lowercase__ : int= image_size # flow attributes lowercase__ : Dict= train_size # multimodal autoencoding attributes lowercase__ : Any= num_frames lowercase__ : Tuple= audio_samples_per_frame lowercase__ : Any= samples_per_patch lowercase__ : Union[str, Any]= output_shape class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : int= {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : List[str]= {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return 1e-4 def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , ): '''simple docstring''' # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(snake_case__ , snake_case__ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase__ : Tuple= compute_effective_axis_dimension( snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase__ : List[str]= preprocessor.num_special_tokens_to_add(snake_case__ ) lowercase__ : Any= compute_effective_axis_dimension( snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ ) # Generate dummy inputs according to compute batch and sequence lowercase__ : List[str]= [" ".join(["a"] ) * seq_length] * batch_size lowercase__ : List[str]= dict(preprocessor(snake_case__ , return_tensors=snake_case__ ) ) lowercase__ : Any= inputs.pop("input_ids" ) return inputs elif isinstance(snake_case__ , snake_case__ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase__ : List[str]= compute_effective_axis_dimension(snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch ) lowercase__ : List[str]= self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase__ : int= dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) ) lowercase__ : Optional[int]= inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
85
"""simple docstring""" def lowercase__(A ) ->list: """simple docstring""" if n_term == "": return [] lowercase__ : list= [] for temp in range(int(A ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
85
1
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def lowercase__(A , A , A ) ->int: """simple docstring""" lowercase__ : Dict= LxmertConfig.from_json_file(A ) print(f'''Building PyTorch model from configuration: {config}''' ) lowercase__ : Optional[int]= LxmertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A , A , A ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , A ) if __name__ == "__main__": a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
85
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : int = logging.get_logger(__name__) a : str = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "big_bird" def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , ) lowercase__ : Dict= vocab_size lowercase__ : Optional[int]= max_position_embeddings lowercase__ : List[Any]= hidden_size lowercase__ : List[str]= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : Optional[int]= intermediate_size lowercase__ : Optional[int]= hidden_act lowercase__ : Tuple= hidden_dropout_prob lowercase__ : int= attention_probs_dropout_prob lowercase__ : int= initializer_range lowercase__ : List[Any]= type_vocab_size lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : Optional[Any]= use_cache lowercase__ : Union[str, Any]= rescale_embeddings lowercase__ : Union[str, Any]= attention_type lowercase__ : Any= use_bias lowercase__ : List[Any]= block_size lowercase__ : Optional[Any]= num_random_blocks lowercase__ : Optional[int]= classifier_dropout class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Tuple= {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
85
1
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ): '''simple docstring''' super().__init__() self.register_modules(transformer=snake_case__ , vae=snake_case__ , scheduler=snake_case__ ) # create a imagenet -> id dictionary for easier use lowercase__ : Union[str, Any]= {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split("," ): lowercase__ : Optional[int]= int(snake_case__ ) lowercase__ : Optional[Any]= dict(sorted(self.labels.items() ) ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): lowercase__ : int= list(snake_case__ ) for l in label: if l not in self.labels: raise ValueError( F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , snake_case__ , snake_case__ = 4.0 , snake_case__ = None , snake_case__ = 50 , snake_case__ = "pil" , snake_case__ = True , ): '''simple docstring''' lowercase__ : Optional[Any]= len(snake_case__ ) lowercase__ : Any= self.transformer.config.sample_size lowercase__ : List[Any]= self.transformer.config.in_channels lowercase__ : List[Any]= randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case__ , device=self.device , dtype=self.transformer.dtype , ) lowercase__ : str= torch.cat([latents] * 2 ) if guidance_scale > 1 else latents lowercase__ : List[str]= torch.tensor(snake_case__ , device=self.device ).reshape(-1 ) lowercase__ : Optional[Any]= torch.tensor([1000] * batch_size , device=self.device ) lowercase__ : int= torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(snake_case__ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: lowercase__ : Optional[Any]= latent_model_input[: len(snake_case__ ) // 2] lowercase__ : Tuple= torch.cat([half, half] , dim=0 ) lowercase__ : Optional[int]= self.scheduler.scale_model_input(snake_case__ , snake_case__ ) lowercase__ : Union[str, Any]= t if not torch.is_tensor(snake_case__ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) lowercase__ : Any= latent_model_input.device.type == "mps" if isinstance(snake_case__ , snake_case__ ): lowercase__ : Tuple= torch.floataa if is_mps else torch.floataa else: lowercase__ : Tuple= torch.intaa if is_mps else torch.intaa lowercase__ : List[Any]= torch.tensor([timesteps] , dtype=snake_case__ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: lowercase__ : Dict= timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : Optional[int]= timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output lowercase__ : Optional[Any]= self.transformer( snake_case__ , timestep=snake_case__ , class_labels=snake_case__ ).sample # perform guidance if guidance_scale > 1: lowercase__, lowercase__ : Optional[int]= noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] lowercase__, lowercase__ : Dict= torch.split(snake_case__ , len(snake_case__ ) // 2 , dim=0 ) lowercase__ : Optional[Any]= uncond_eps + guidance_scale * (cond_eps - uncond_eps) lowercase__ : Tuple= torch.cat([half_eps, half_eps] , dim=0 ) lowercase__ : Optional[int]= torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: lowercase__, lowercase__ : Dict= torch.split(snake_case__ , snake_case__ , dim=1 ) else: lowercase__ : str= noise_pred # compute previous image: x_t -> x_t-1 lowercase__ : Any= self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample if guidance_scale > 1: lowercase__, lowercase__ : Any= latent_model_input.chunk(2 , dim=0 ) else: lowercase__ : List[str]= latent_model_input lowercase__ : Any= 1 / self.vae.config.scaling_factor * latents lowercase__ : Dict= self.vae.decode(snake_case__ ).sample lowercase__ : Dict= (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase__ : Tuple= samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase__ : Tuple= self.numpy_to_pil(snake_case__ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=snake_case__ )
85
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
1
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch a : Union[str, Any] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["pixel_values"] def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = True , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Any= size if size is not None else {"shortest_edge": 224} lowercase__ : str= get_size_dict(snake_case__ , default_to_square=snake_case__ ) lowercase__ : Dict= crop_size if crop_size is not None else {"height": 256, "width": 256} lowercase__ : Optional[int]= get_size_dict(snake_case__ , param_name="crop_size" ) lowercase__ : Optional[Any]= do_resize lowercase__ : List[str]= size lowercase__ : Optional[int]= resample lowercase__ : Optional[int]= do_rescale lowercase__ : List[Any]= rescale_factor lowercase__ : int= do_center_crop lowercase__ : str= crop_size lowercase__ : Any= do_flip_channel_order def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = PIL.Image.BILINEAR , snake_case__ = None , **snake_case__ , ): '''simple docstring''' lowercase__ : Any= get_size_dict(snake_case__ , default_to_square=snake_case__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : List[str]= get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ ) return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ): '''simple docstring''' lowercase__ : Any= get_size_dict(snake_case__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ): '''simple docstring''' return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return flip_channel_order(snake_case__ , data_format=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= do_resize if do_resize is not None else self.do_resize lowercase__ : List[Any]= resample if resample is not None else self.resample lowercase__ : List[Any]= do_rescale if do_rescale is not None else self.do_rescale lowercase__ : str= rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Optional[Any]= do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : List[str]= ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) lowercase__ : Any= size if size is not None else self.size lowercase__ : Union[str, Any]= get_size_dict(snake_case__ , default_to_square=snake_case__ ) lowercase__ : str= crop_size if crop_size is not None else self.crop_size lowercase__ : Union[str, Any]= get_size_dict(snake_case__ , param_name="crop_size" ) lowercase__ : Dict= make_list_of_images(snake_case__ ) if not valid_images(snake_case__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) # All transformations expect numpy arrays. lowercase__ : str= [to_numpy_array(snake_case__ ) for image in images] if do_resize: lowercase__ : Tuple= [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images] if do_center_crop: lowercase__ : List[str]= [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images] if do_rescale: lowercase__ : int= [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: lowercase__ : List[Any]= [self.flip_channel_order(image=snake_case__ ) for image in images] lowercase__ : Optional[Any]= [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images] lowercase__ : Any= {"pixel_values": images} return BatchFeature(data=snake_case__ , tensor_type=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : Tuple= outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(snake_case__ ) != len(snake_case__ ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(snake_case__ ): lowercase__ : Any= target_sizes.numpy() lowercase__ : List[str]= [] for idx in range(len(snake_case__ ) ): lowercase__ : Optional[Any]= torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case__ ) lowercase__ : Dict= resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(snake_case__ ) else: lowercase__ : int= logits.argmax(dim=1 ) lowercase__ : Dict= [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
85
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : str= [] for part_id in partition_order: lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(A ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Dict= Spark(A ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(2 ) lowercase__ : Optional[Any]= [1, 0] lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions. lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->int: """simple docstring""" lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(1 ) lowercase__ : str= SparkExamplesIterable(A ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(A ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : int= spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: lowercase__ : Optional[Any]= lambda A : x.reverse() lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] ) lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Any: """simple docstring""" lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Optional[int]= Spark(A ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
85
1
"""simple docstring""" import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer a : Any = logging.getLogger(__name__) def lowercase__() ->Optional[int]: """simple docstring""" lowercase__ : List[Any]= argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" , type=A , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , ) parser.add_argument( "--dataset_config" , type=A , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" , type=A , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , ) parser.add_argument( "--shard_size" , type=A , default=1_000 , help="Number of entries to go in a single shard." , ) parser.add_argument("--split" , type=A , default="train" , choices=["train", "test", "validation"] ) parser.add_argument( "--limit" , default=A , type=A , help="Limit the number of shards (used for debugging)." , ) parser.add_argument( "--max_length" , type=A , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." , ) parser.add_argument( "--output_dir" , default="tf-tpu" , type=A , help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." , ) lowercase__ : List[str]= parser.parse_args() return args def lowercase__(A ) ->Any: """simple docstring""" def fn(A ): return tokenizer(examples["text"] ) return fn def lowercase__(A ) ->List[str]: """simple docstring""" lowercase__ : Optional[int]= [] for i in range(len(tokenized_data["input_ids"] ) ): lowercase__ : List[str]= { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } lowercase__ : Dict= tf.train.Features(feature=A ) lowercase__ : int= tf.train.Example(features=A ) lowercase__ : Tuple= example.SerializeToString() records.append(A ) return records def lowercase__(A ) ->Dict: """simple docstring""" lowercase__ : Dict= datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: lowercase__ : Union[str, Any]= min(len(A ) , args.limit ) lowercase__ : List[str]= dataset.select(range(A ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) lowercase__ : List[str]= AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) lowercase__ : List[str]= os.path.join(args.output_dir , args.split ) if not os.path.exists(A ): os.makedirs(A ) else: lowercase__ : int= os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. lowercase__ : Any= tokenize_function(A ) lowercase__ : Tuple= dataset.map(A , batched=A , num_proc=4 , remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(A ): # Concatenate all texts. lowercase__ : List[str]= {k: sum(examples[k] , [] ) for k in examples.keys()} lowercase__ : int= len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 lowercase__ : Tuple= (total_length // args.max_length) * args.max_length # Split by chunks of max_len. lowercase__ : Optional[int]= { k: [t[i : i + args.max_length] for i in range(0 , A , args.max_length )] for k, t in concatenated_examples.items() } return result lowercase__ : List[str]= dataset_tokenized.map(A , batched=A , batch_size=1_000 , num_proc=4 ) lowercase__ : Union[str, Any]= 0 lowercase__ : Any= 0 for shard in range(0 , len(A ) , args.shard_size ): lowercase__ : Optional[int]= grouped_dataset[shard : shard + args.shard_size] lowercase__ : Optional[Any]= len(dataset_snapshot["input_ids"] ) lowercase__ : Optional[int]= os.path.join(A , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) lowercase__ : Optional[int]= get_serialized_examples(A ) with tf.io.TFRecordWriter(A ) as out_file: for i in range(len(A ) ): lowercase__ : Tuple= serialized_examples[i] out_file.write(A ) print("Wrote file {} containing {} records".format(A , A ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , "w" ) as f: print(f'''Total {args.split} records: {total_records}''' , file=A ) if __name__ == "__main__": a : Optional[Any] = parse_args() main(args)
85
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ): '''simple docstring''' lowercase__ : Optional[int]= parent lowercase__ : Tuple= batch_size lowercase__ : Tuple= seq_length lowercase__ : str= is_training lowercase__ : str= use_input_lengths lowercase__ : Any= use_token_type_ids lowercase__ : List[Any]= use_labels lowercase__ : Optional[int]= gelu_activation lowercase__ : str= sinusoidal_embeddings lowercase__ : List[str]= causal lowercase__ : Any= asm lowercase__ : Optional[int]= n_langs lowercase__ : Union[str, Any]= vocab_size lowercase__ : int= n_special lowercase__ : Any= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : List[str]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : Any= max_position_embeddings lowercase__ : List[Any]= type_vocab_size lowercase__ : int= type_sequence_label_size lowercase__ : Any= initializer_range lowercase__ : Optional[int]= num_labels lowercase__ : Union[str, Any]= num_choices lowercase__ : List[Any]= summary_type lowercase__ : Optional[int]= use_proj lowercase__ : int= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple= None if self.use_input_lengths: lowercase__ : List[Any]= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase__ : Tuple= None if self.use_token_type_ids: lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase__ : str= None lowercase__ : Tuple= None lowercase__ : Dict= None if self.use_labels: lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float() lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any]= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : Any= FlaubertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) lowercase__ : str= model(snake_case__ , langs=snake_case__ ) lowercase__ : Any= model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[str]= model(snake_case__ ) lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= model(snake_case__ ) lowercase__ : Any= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) lowercase__ : List[str]= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) ((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple() lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) ((lowercase__), ) : List[Any]= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ ) lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= self.num_labels lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : int= self.num_choices lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any= model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) : Any= config_and_inputs lowercase__ : Tuple= { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase__ : List[Any]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) lowercase__ : List[str]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModelTester(self ) lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase__ : int= True lowercase__ : List[Any]= model_class(config=snake_case__ ) lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : Dict= torch.jit.trace( snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) ) lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ ) loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) ) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowercase__ : Optional[int]= model(snake_case__ )[0] lowercase__ : Optional[int]= torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case__ ) lowercase__ : Dict= torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
85
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" ) lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy() lowercase__ : int= -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
85
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = None class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 2 @register_to_config def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ): '''simple docstring''' # standard deviation of the initial noise distribution lowercase__ : int= sigma_max # setable values lowercase__ : int= None lowercase__ : np.IntTensor= None lowercase__ : torch.FloatTensor= None # sigma(t_i) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return sample def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : List[Any]= num_inference_steps lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy() lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ ) lowercase__ : Union[str, Any]= [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowercase__ : str= 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device ) lowercase__ : str= sigma + gamma * sigma lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : int= sample_prev + sigma_prev * model_output lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' raise NotImplementedError()
85
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a : Union[str, Any] = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def lowercase__(A ) ->Optional[int]: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A ) def lowercase__(A ) ->List[Any]: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main lowercase__ : str= terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(A , id=A )
85
"""simple docstring""" from ....utils import logging a : List[str] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ): '''simple docstring''' lowercase__ : Dict= config.__dict__ lowercase__ : str= modal_hidden_size if num_labels: lowercase__ : List[str]= num_labels
85
1
"""simple docstring""" def lowercase__(A = 10 , A = 22 ) ->int: """simple docstring""" lowercase__ : List[str]= range(1 , A ) lowercase__ : int= range(1 , A ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
85
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Optional[int]= [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : Any= [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def lowercase__(A ) ->List[Any]: """simple docstring""" lowercase__ : Dict= [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") ) return token def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Dict= [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowercase__(A , A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : List[str]= "imagenet-1k-id2label.json" lowercase__ : List[str]= 1_000 lowercase__ : Tuple= "huggingface/label-files" lowercase__ : int= num_labels lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) ) lowercase__ : str= {int(A ): v for k, v in idalabel.items()} lowercase__ : Optional[int]= idalabel lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()} lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowercase__ : int= [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowercase__ : Union[str, Any]= [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Optional[Any]= [2, 2, 20] lowercase__ : Optional[Any]= [3, 12, 16] lowercase__ : List[str]= [192, 768, 1_024] lowercase__ : List[str]= CvtForImageClassification(A ) lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowercase__ : Dict= image_size lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) ) lowercase__ : Optional[Any]= OrderedDict() lowercase__ : Tuple= [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Optional[int]= list_of_state_dict + cls_token(A ) lowercase__ : List[str]= list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): lowercase__ : Dict= list_of_state_dict + attention(A , A ) lowercase__ : Optional[Any]= list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): lowercase__ : str= original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
85
1