code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class A_ : '''simple docstring''' def __init__( self , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Optional[int] = parent UpperCAmelCase_ : Any = 13 UpperCAmelCase_ : Optional[int] = 7 UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : int = True UpperCAmelCase_ : Any = True UpperCAmelCase_ : Optional[int] = 99 UpperCAmelCase_ : Tuple = 32 UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[Any] = 4 UpperCAmelCase_ : List[str] = 37 UpperCAmelCase_ : Optional[int] = "gelu" UpperCAmelCase_ : Union[str, Any] = 0.1 UpperCAmelCase_ : Optional[int] = 0.1 UpperCAmelCase_ : Dict = 512 UpperCAmelCase_ : str = 16 UpperCAmelCase_ : Any = 2 UpperCAmelCase_ : int = 0.02 UpperCAmelCase_ : str = 3 UpperCAmelCase_ : Optional[Any] = 4 UpperCAmelCase_ : List[str] = None def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : str = None if self.use_input_mask: UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : Optional[Any] = None if self.use_labels: UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : Dict = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = TFEsmModel(config=lowercase_ ) UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase_ : Union[str, Any] = model(lowercase_ ) UpperCAmelCase_ : Optional[Any] = [input_ids, input_mask] UpperCAmelCase_ : Tuple = model(lowercase_ ) UpperCAmelCase_ : List[str] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Dict = True UpperCAmelCase_ : str = TFEsmModel(config=lowercase_ ) UpperCAmelCase_ : str = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } UpperCAmelCase_ : Any = model(lowercase_ ) UpperCAmelCase_ : Optional[Any] = [input_ids, input_mask] UpperCAmelCase_ : str = model(lowercase_ , encoder_hidden_states=lowercase_ ) # Also check the case where encoder outputs are not passed UpperCAmelCase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = TFEsmForMaskedLM(config=lowercase_ ) UpperCAmelCase_ : List[Any] = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.num_labels UpperCAmelCase_ : Union[str, Any] = TFEsmForTokenClassification(config=lowercase_ ) UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase_ : List[str] = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : List[str] = config_and_inputs UpperCAmelCase_ : str = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : List[str] = False def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = TFEsmModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[Any] = TFEsmModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @unittest.skip("Protein models do not support embedding resizing." ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip("Protein models do not support embedding resizing." ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer UpperCAmelCase_ : List[str] = model.get_bias() assert isinstance(lowercase_ , lowercase_ ) for k, v in name.items(): assert isinstance(lowercase_ , tf.Variable ) else: UpperCAmelCase_ : Union[str, Any] = model.get_output_embeddings() assert x is None UpperCAmelCase_ : Optional[int] = model.get_bias() assert name is None @require_tf class A_ (unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) UpperCAmelCase_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase_ : Dict = model(lowercase_ )[0] UpperCAmelCase_ : List[Any] = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , lowercase_ ) # compare the actual values for a slice. UpperCAmelCase_ : Tuple = tf.constant( [ [ [8.92_15_18, -10.58_98_14, -6.4_67_13_07], [-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15], [-7.78_12_47, -13.95_15_57, -3.74_05_92], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) UpperCAmelCase_ : Optional[int] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) UpperCAmelCase_ : Union[str, Any] = model(lowercase_ )[0] # compare the actual values for a slice. UpperCAmelCase_ : Tuple = tf.constant( [ [ [0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39], [0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22], [0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
61
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a__ : Optional[Any] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['''memory_attention''', '''encoder_attn'''], ['''attention''', '''attn'''], ['''/''', '''.'''], ['''.LayerNorm.gamma''', '''_layer_norm.weight'''], ['''.LayerNorm.beta''', '''_layer_norm.bias'''], ['''r.layer_''', '''r.layers.'''], ['''output_proj''', '''out_proj'''], ['''ffn.dense_1.''', '''fc2.'''], ['''ffn.dense.''', '''fc1.'''], ['''ffn_layer_norm''', '''final_layer_norm'''], ['''kernel''', '''weight'''], ['''encoder_layer_norm.''', '''encoder.layer_norm.'''], ['''decoder_layer_norm.''', '''decoder.layer_norm.'''], ['''embeddings.weights''', '''shared.weight'''], ] def UpperCAmelCase_( a__ ): """simple docstring""" for pegasus_name, hf_name in PATTERNS: SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ ) return k def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy() cfg_kwargs.update(a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ ) SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ ) SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict() SCREAMING_SNAKE_CASE : List[str] = {} for k, v in tf_weights.items(): SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: SCREAMING_SNAKE_CASE : Dict = v.T SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) SCREAMING_SNAKE_CASE : int = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ ) SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ): SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name ) if skip_key: continue SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ ) SCREAMING_SNAKE_CASE : Any = array return tf_weights def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(a__ ) # convert model SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ ) SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": SCREAMING_SNAKE_CASE : int = task_specific_params SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ ) torch_model.save_pretrained(a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') a__ : List[str] = parser.parse_args() if args.save_dir is None: a__ : Any = Path(args.tf_ckpt_path).parent.name a__ : int = os.path.join('''pegasus''', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
313
0
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) _A = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self , A_ , A_ , A_ = None , A_ = None ) -> Optional[Any]: __UpperCamelCase =None __UpperCamelCase =os.path.abspath(os.path.join('examples' , 'by_feature' ) ) __UpperCamelCase =os.path.abspath('examples' ) for item in os.listdir(A_ ): if item not in EXCLUDE_EXAMPLES: __UpperCamelCase =os.path.join(A_ , A_ ) if os.path.isfile(A_ ) and ".py" in item_path: with self.subTest( tested_script=A_ , feature_script=A_ , tested_section='main()' if parser_only else 'training_function()' , ): __UpperCamelCase =compare_against_test( os.path.join(A_ , A_ ) , A_ , A_ , A_ ) __UpperCamelCase ='\n'.join(A_ ) if special_strings is not None: for string in special_strings: __UpperCamelCase =diff.replace(A_ , '' ) self.assertEqual(A_ , '' ) def _a ( self ) -> Dict: self.one_complete_example('complete_nlp_example.py' , A_ ) self.one_complete_example('complete_nlp_example.py' , A_ ) def _a ( self ) -> Dict: __UpperCamelCase =os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) __UpperCamelCase =[ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , A_ , A_ , A_ ) self.one_complete_example('complete_cv_example.py' , A_ , A_ , A_ ) @mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} ) class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Tuple = False @classmethod def _a ( cls ) -> Union[str, Any]: super().setUpClass() __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) __UpperCamelCase =['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def _a ( cls ) -> Union[str, Any]: super().tearDownClass() shutil.rmtree(cls._tmpdir ) def _a ( self ) -> List[Any]: __UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split() __UpperCamelCase =run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def _a ( self ) -> Tuple: __UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split() __UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ ) self.assertNotIn('epoch 0:' , A_ ) self.assertIn('epoch 1:' , A_ ) def _a ( self ) -> int: __UpperCamelCase =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split() __UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ ) if torch.cuda.is_available(): __UpperCamelCase =torch.cuda.device_count() else: __UpperCamelCase =1 if num_processes > 1: self.assertNotIn('epoch 0:' , A_ ) self.assertIn('epoch 1:' , A_ ) else: self.assertIn('epoch 0:' , A_ ) self.assertIn('epoch 1:' , A_ ) @slow def _a ( self ) -> Optional[Any]: __UpperCamelCase ='\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): __UpperCamelCase =run_command(self._launch_args + testargs , return_stdout=A_ ) __UpperCamelCase =re.findall('({.+})' , A_ ) __UpperCamelCase =[r for r in results if 'accuracy' in r][-1] __UpperCamelCase =ast.literal_eval(A_ ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def _a ( self ) -> str: __UpperCamelCase =['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def _a ( self ) -> List[str]: with tempfile.TemporaryDirectory() as tmpdir: __UpperCamelCase =f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(A_ , 'tracking' ) ) ) def _a ( self ) -> Optional[int]: __UpperCamelCase =['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def _a ( self ) -> List[Any]: __UpperCamelCase =['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
62
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline __SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image'] __SCREAMING_SNAKE_CASE : int = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', ] __SCREAMING_SNAKE_CASE : int = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->int: return 32 @property def __lowerCAmelCase ( self ) ->List[str]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Tuple: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 100 @property def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = text_encoder.eval() return text_encoder @property def __lowerCAmelCase ( self ) ->Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->List[str]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq SCREAMING_SNAKE_CASE : Optional[Any] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str: SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : str = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Dict = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Optional[int] = np.array( [0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : Dict = pipeline( _lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Tuple = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase_ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : int = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[Any] = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def UpperCAmelCase_( a__ , a__=False ): """simple docstring""" SCREAMING_SNAKE_CASE : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def UpperCAmelCase_( a__ , a__ , a__=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE : Any = '''''' else: SCREAMING_SNAKE_CASE : Optional[int] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :] def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a__ , a__ ) def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(a__ , a__ ) def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = val def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = ViTMSNConfig() SCREAMING_SNAKE_CASE : Optional[int] = 1_000 SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files''' SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json''' SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) ) SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : str = idalabel SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = 384 SCREAMING_SNAKE_CASE : Any = 1_536 SCREAMING_SNAKE_CASE : List[str] = 6 elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : Optional[int] = 1_024 SCREAMING_SNAKE_CASE : Optional[int] = 4_096 SCREAMING_SNAKE_CASE : Tuple = 24 SCREAMING_SNAKE_CASE : Union[str, Any] = 16 SCREAMING_SNAKE_CASE : Dict = 0.1 elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : str = 4 elif "l7" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = 7 SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024 SCREAMING_SNAKE_CASE : List[Any] = 4_096 SCREAMING_SNAKE_CASE : List[Any] = 24 SCREAMING_SNAKE_CASE : Tuple = 16 SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder'''] SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(a__ ) SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ ) for src, dest in rename_keys: rename_key(a__ , a__ , a__ ) read_in_q_k_v(a__ , a__ , base_model=a__ ) model.load_state_dict(a__ ) model.eval() SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw ) SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor( size=config.image_size , image_mean=a__ , image_std=a__ ) SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) SCREAMING_SNAKE_CASE : Tuple = model(**a__ ) SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(a__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a__ : Any = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
313
0
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : str , snake_case__ : list[str] ): """simple docstring""" _snake_case : int = """""" for word_or_phrase in separated: if not isinstance(snake_case__ , snake_case__ ): raise Exception("""join() accepts only strings to be joined""" ) joined += word_or_phrase + separator return joined.strip(snake_case__ ) if __name__ == "__main__": from doctest import testmod testmod()
64
import csv import tweepy # Twitter API credentials a__ : Union[str, Any] = '''''' a__ : List[str] = '''''' a__ : Any = '''''' a__ : List[str] = '''''' def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ ) auth.set_access_token(a__ , a__ ) SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ ) # initialize a list to hold all the tweepy Tweets SCREAMING_SNAKE_CASE : Any = [] # make initial request for most recent tweets (200 is the maximum allowed count) SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 ) # save most recent tweets alltweets.extend(a__ ) # save the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(a__ ) > 0: print(F"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates SCREAMING_SNAKE_CASE : Any = api.user_timeline( screen_name=a__ , count=200 , max_id=a__ ) # save most recent tweets alltweets.extend(a__ ) # update the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1 print(F"""...{len(a__ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(a__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
313
0
import math def lowerCAmelCase_ ( __A ) -> bool: '''simple docstring''' return math.sqrt(__A ) * math.sqrt(__A ) == num def lowerCAmelCase_ ( __A ) -> bool: '''simple docstring''' UpperCAmelCase__ = 0 UpperCAmelCase__ = n while left <= right: UpperCAmelCase__ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: UpperCAmelCase__ = mid - 1 else: UpperCAmelCase__ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
65
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : List[str] = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = 'ibert' def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any: super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Dict = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = position_embedding_type SCREAMING_SNAKE_CASE : Optional[int] = quant_mode SCREAMING_SNAKE_CASE : Dict = force_dequant class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
313
0
"""simple docstring""" import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' _A : Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING _A : int = TF_MODEL_FOR_MASKED_LM_MAPPING def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: snake_case_ :Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" ) snake_case_ :str = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""}, ] , ) snake_case_ :List[Any] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser""", }, ] , ) snake_case_ :Any = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]: snake_case_ :List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" ) snake_case_ :Optional[int] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) snake_case_ :Union[str, Any] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) snake_case_ :str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, ] , ) snake_case_ :List[Any] = unmasker("""My name is <mask> <mask>""" , top_k=2 ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def lowerCAmelCase_ ( self: Any ) -> str: snake_case_ :Optional[int] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" ) # convert model to fp16 pipe.model.half() snake_case_ :List[str] = pipe("""Paris is the [MASK] of France.""" ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(snake_case , snake_case ) @slow @require_torch def lowerCAmelCase_ ( self: Dict ) -> Dict: snake_case_ :Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" ) self.run_large_test(snake_case ) @slow @require_tf def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]: snake_case_ :Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" ) self.run_large_test(snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: List[Any] ) -> Union[str, Any]: snake_case_ :Optional[Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(snake_case ) , [ {"""sequence""": """My name is John""", """score""": 0.0_0_8, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.0_0_7, """token""": 1_573, """token_str""": """ Chris"""}, ] , ) snake_case_ :List[Any] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(snake_case ) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.2_5_1, """token""": 2_201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.2_1_4, """token""": 12_790, """token_str""": """ Lyon""", }, ] , ) snake_case_ :int = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(snake_case ) , [ {"""sequence""": """My name is Patrick""", """score""": 0.0_0_5, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.0_0_0, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.0_0_0, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case_ :str = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" ) snake_case_ :Any = None snake_case_ :Tuple = None self.run_pipeline_test(snake_case , [] ) @require_tf def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]: snake_case_ :int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" ) snake_case_ :List[str] = None snake_case_ :List[Any] = None self.run_pipeline_test(snake_case , [] ) def lowerCAmelCase_ ( self: List[Any] , snake_case: int , snake_case: Tuple , snake_case: Optional[int] ) -> Any: if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" ) snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) snake_case_ :str = [ f"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def lowerCAmelCase_ ( self: Any , snake_case: Optional[Any] , snake_case: Tuple ) -> Union[str, Any]: snake_case_ :Any = fill_masker.tokenizer snake_case_ :List[Any] = fill_masker.model snake_case_ :int = fill_masker( f"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) snake_case_ :Optional[int] = fill_masker([f"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) snake_case_ :Union[str, Any] = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( snake_case , [ [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ], [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ], ] , ) with self.assertRaises(snake_case ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(snake_case ): fill_masker("""This is""" ) self.run_test_top_k(snake_case , snake_case ) self.run_test_targets(snake_case , snake_case ) self.run_test_top_k_targets(snake_case , snake_case ) self.fill_mask_with_duplicate_targets_and_top_k(snake_case , snake_case ) self.fill_mask_with_multiple_masks(snake_case , snake_case ) def lowerCAmelCase_ ( self: Any , snake_case: int , snake_case: int ) -> int: snake_case_ :List[str] = tokenizer.get_vocab() snake_case_ :Dict = sorted(vocab.keys() )[:2] # Pipeline argument snake_case_ :Any = FillMaskPipeline(model=snake_case , tokenizer=snake_case , targets=snake_case ) snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) snake_case_ :List[str] = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , snake_case ) snake_case_ :Optional[int] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case ) ) # Call argument snake_case_ :int = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) snake_case_ :Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) snake_case_ :Tuple = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , snake_case ) snake_case_ :Tuple = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case ) ) # Score equivalence snake_case_ :Tuple = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case ) snake_case_ :Any = [top_mask["""token_str"""] for top_mask in outputs] snake_case_ :Union[str, Any] = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(snake_case ) == set(snake_case ): snake_case_ :Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case ) snake_case_ :Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) ) # Raises with invalid with self.assertRaises(snake_case ): snake_case_ :Union[str, Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(snake_case ): snake_case_ :int = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] ) with self.assertRaises(snake_case ): snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" ) def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: List[str] ) -> Union[str, Any]: snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case , top_k=2 ) snake_case_ :str = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) snake_case_ :Optional[int] = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) ) def lowerCAmelCase_ ( self: Any , snake_case: List[str] , snake_case: List[Any] ) -> Tuple: snake_case_ :Tuple = tokenizer.get_vocab() snake_case_ :List[str] = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) # top_k=2, ntargets=3 snake_case_ :List[Any] = sorted(vocab.keys() )[:3] snake_case_ :Any = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=snake_case ) # If we use the most probably targets, and filter differently, we should still # have the same results snake_case_ :str = [el["""token_str"""] for el in sorted(snake_case , key=lambda snake_case : x["score"] , reverse=snake_case )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(snake_case ).issubset(snake_case ): snake_case_ :Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=snake_case ) # They should yield exactly the same result self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) ) def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] , snake_case: Dict ) -> Tuple: snake_case_ :List[str] = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) snake_case_ :Dict = tokenizer.get_vocab() # String duplicates + id duplicates snake_case_ :Optional[Any] = sorted(vocab.keys() )[:3] snake_case_ :Union[str, Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]] snake_case_ :str = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=snake_case , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(snake_case ) , 3 ) def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict ) -> int: snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) snake_case_ :Dict = fill_masker( f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( snake_case , [ [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ], [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ], [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ], ] , )
66
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType a__ : Any = logging.get_logger(__name__) a__ : Dict = { '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = 'imagegpt' __SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values'] __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str: SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = n_positions SCREAMING_SNAKE_CASE : Optional[int] = n_embd SCREAMING_SNAKE_CASE : List[Any] = n_layer SCREAMING_SNAKE_CASE : List[Any] = n_head SCREAMING_SNAKE_CASE : int = n_inner SCREAMING_SNAKE_CASE : Dict = activation_function SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop SCREAMING_SNAKE_CASE : Dict = embd_pdrop SCREAMING_SNAKE_CASE : List[str] = attn_pdrop SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : int = scale_attn_weights SCREAMING_SNAKE_CASE : Optional[int] = use_cache SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase ) class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]: SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) ) return inputs
313
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]: __lowerCamelCase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(UpperCamelCase__ , UpperCamelCase__ ) def __lowerCAmelCase ( UpperCamelCase__ ) -> Dict: __lowerCamelCase , __lowerCamelCase = emb.weight.shape __lowerCamelCase = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ ) __lowerCamelCase = emb.weight.data return lin_layer def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__="facebook/mbart-large-en-ro" , UpperCamelCase__=False , UpperCamelCase__=False ) -> List[str]: __lowerCamelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model'''] remove_ignore_keys_(UpperCamelCase__ ) __lowerCamelCase = state_dict['''encoder.embed_tokens.weight'''].shape[0] __lowerCamelCase = MBartConfig.from_pretrained(UpperCamelCase__ , vocab_size=UpperCamelCase__ ) if mbart_aa and finetuned: __lowerCamelCase = '''relu''' __lowerCamelCase = state_dict['''decoder.embed_tokens.weight'''] __lowerCamelCase = MBartForConditionalGeneration(UpperCamelCase__ ) model.model.load_state_dict(UpperCamelCase__ ) if finetuned: __lowerCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __UpperCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") __UpperCAmelCase =parser.parse_args() __UpperCAmelCase =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
67
from maths.prime_check import is_prime def UpperCAmelCase_( a__ ): """simple docstring""" if not isinstance(a__ , a__ ): SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer""" raise TypeError(a__ ) if is_prime(a__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
313
0
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase__ = 2_5_0_0_0_4 lowerCAmelCase__ = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = MBartaaTokenizer __lowerCamelCase = MBartaaTokenizerFast __lowerCamelCase = True __lowerCamelCase = True def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = MBartaaTokenizer(lowercase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = "<s>" A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowercase ) , 1054 ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = MBartaaTokenizer(lowercase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowercase ) A__ = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) A__ = tokenizer.convert_tokens_to_ids(lowercase ) self.assertListEqual( lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A__ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = {"input_ids": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , ) def UpperCamelCase ( self ) -> str: '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return A__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A__ = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) A__ = self.tokenizer_class.from_pretrained(lowercase , **lowercase ) A__ = tempfile.mkdtemp() A__ = tokenizer_r.save_pretrained(lowercase ) A__ = tokenizer_p.save_pretrained(lowercase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) A__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(lowercase , lowercase ) # Checks everything loads correctly in the same way A__ = tokenizer_r.from_pretrained(lowercase ) A__ = tokenizer_p.from_pretrained(lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase , lowercase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowercase ) # Save tokenizer rust, legacy_format=True A__ = tempfile.mkdtemp() A__ = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase ) A__ = tokenizer_p.save_pretrained(lowercase ) # Checks it save with the same files self.assertSequenceEqual(lowercase , lowercase ) # Checks everything loads correctly in the same way A__ = tokenizer_r.from_pretrained(lowercase ) A__ = tokenizer_p.from_pretrained(lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase , lowercase ) ) shutil.rmtree(lowercase ) # Save tokenizer rust, legacy_format=False A__ = tempfile.mkdtemp() A__ = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase ) A__ = tokenizer_p.save_pretrained(lowercase ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way A__ = tokenizer_r.from_pretrained(lowercase ) A__ = tokenizer_p.from_pretrained(lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase , lowercase ) ) shutil.rmtree(lowercase ) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): """simple docstring""" __lowerCamelCase = 'facebook/mbart-large-50-one-to-many-mmt' __lowerCamelCase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] __lowerCamelCase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] __lowerCamelCase = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2] @classmethod def UpperCamelCase ( cls ) -> Tuple: '''simple docstring''' A__ = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) A__ = 1 return cls def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250038 ) def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' self.assertIn(lowercase , self.tokenizer.all_special_ids ) A__ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] A__ = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase ) A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase ) self.assertEqual(lowercase , lowercase ) self.assertNotIn(self.tokenizer.eos_token , lowercase ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , lowercase ) A__ = 10 A__ = self.tokenizer(lowercase , max_length=lowercase , truncation=lowercase ).input_ids[0] self.assertEqual(ids[0] , lowercase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250053, 250001] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = tempfile.mkdtemp() A__ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowercase ) A__ = MBartaaTokenizer.from_pretrained(lowercase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase ) @require_torch def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase , return_tensors="pt" ) A__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) A__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) A__ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowercase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.tokenizer(self.src_text , padding=lowercase , truncation=lowercase , max_length=3 , return_tensors="pt" ) A__ = self.tokenizer( text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=10 , return_tensors="pt" ) A__ = targets["input_ids"] A__ = shift_tokens_right(lowercase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(lowercase ) , { # en_XX, A, test, EOS "input_ids": [[250004, 62, 3034, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
68
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline __SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[str] = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->Optional[Any]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 32 @property def __lowerCAmelCase ( self ) ->str: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Dict: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Tuple: return 100 @property def __lowerCAmelCase ( self ) ->int: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->Any: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : str = self.dummy_unet SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq SCREAMING_SNAKE_CASE : List[str] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int: SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) # create hint SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) ) SCREAMING_SNAKE_CASE : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0 SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo''' SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : List[str] = pipeline( image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
"""simple docstring""" __UpperCamelCase = range(2, 20 + 1) __UpperCamelCase = [10**k for k in range(ks[-1] + 1)] __UpperCamelCase = {} def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: snake_case_ = sum(a_i[j] for j in range(UpperCAmelCase , len(UpperCAmelCase ) ) ) snake_case_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase ) , UpperCAmelCase ) ) ) snake_case_ , snake_case_ = 0, 0 snake_case_ = n - i snake_case_ = memo.get(UpperCAmelCase ) if sub_memo is not None: snake_case_ = sub_memo.get(UpperCAmelCase ) if jumps is not None and len(UpperCAmelCase ) > 0: # find and make the largest jump without going over snake_case_ = -1 for _k in range(len(UpperCAmelCase ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: snake_case_ = _k break if max_jump >= 0: snake_case_ , snake_case_ , snake_case_ = jumps[max_jump] # since the difference between jumps is cached, add c snake_case_ = diff + c for j in range(min(UpperCAmelCase , len(UpperCAmelCase ) ) ): snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 ) if new_c > 0: add(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: snake_case_ = [] else: snake_case_ = {c: []} snake_case_ = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps snake_case_ , snake_case_ = next_term(UpperCAmelCase , k - 1 , i + dn , UpperCAmelCase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead snake_case_ , snake_case_ = compute(UpperCAmelCase , UpperCAmelCase , i + dn , UpperCAmelCase ) diff += _diff dn += terms_jumped snake_case_ = sub_memo[c] # keep jumps sorted by # of terms skipped snake_case_ = 0 while j < len(UpperCAmelCase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(UpperCAmelCase , (diff, dn, k) ) return (diff, dn) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: if i >= n: return 0, i if k > len(UpperCAmelCase ): a_i.extend([0 for _ in range(k - len(UpperCAmelCase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) snake_case_ = i snake_case_ , snake_case_ , snake_case_ = 0, 0, 0 for j in range(len(UpperCAmelCase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 snake_case_ = ds_c + ds_b diff += addend snake_case_ = 0 for j in range(UpperCAmelCase ): snake_case_ = a_i[j] + addend snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return diff, i - start_i def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for j in range(UpperCAmelCase , len(UpperCAmelCase ) ): snake_case_ = digits[j] + addend if s >= 10: snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 ) snake_case_ = addend // 10 + quotient else: snake_case_ = s snake_case_ = addend // 10 if addend == 0: break while addend > 0: snake_case_ , snake_case_ = divmod(UpperCAmelCase , 10 ) digits.append(UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase = 10**15 ) -> int: snake_case_ = [1] snake_case_ = 1 snake_case_ = 0 while True: snake_case_ , snake_case_ = next_term(UpperCAmelCase , 20 , i + dn , UpperCAmelCase ) dn += terms_jumped if dn == n - i: break snake_case_ = 0 for j in range(len(UpperCAmelCase ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F"""{solution() = }""")
69
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker a__ : List[str] = '''CompVis/stable-diffusion-v1-1''' a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2''' a__ : Any = '''CompVis/stable-diffusion-v1-3''' a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4''' class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str: super()._init_() SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline( vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def __lowerCAmelCase ( self ) ->Dict[str, Any]: return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )} def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[str]: self.enable_attention_slicing(_lowerCamelCase ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict: SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(_lowerCamelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
313
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : int ) -> Tuple: _lowerCAmelCase = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _lowerCAmelCase = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _lowerCAmelCase = model(__snake_case )["""last_hidden_state"""] _lowerCAmelCase = tf.TensorShape((1, 10, 7_68) ) self.assertEqual(output.shape , __snake_case ) # compare the actual values for a slice. _lowerCAmelCase = tf.convert_to_tensor( [[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
70
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : jnp.ndarray @flax_register_to_config class a_ ( nn.Module , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") __SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False __SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280) __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8 __SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None __SCREAMING_SNAKE_CASE : int = 1280 __SCREAMING_SNAKE_CASE : float = 0.0 __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : bool = False def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict: # init input tensors SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size) SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa ) SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"] def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim # input SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types ) # down SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : int = block_out_channels[i] SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = down_blocks # mid SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): SCREAMING_SNAKE_CASE : Dict = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )] SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1 if up_block_type == "CrossAttnUpBlock2D": SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = output_channel SCREAMING_SNAKE_CASE : Tuple = up_blocks # out SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) SCREAMING_SNAKE_CASE : Any = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]: # 1. time if not isinstance(_lowerCamelCase , jnp.ndarray ): SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 ) SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase ) # 2. pre-process SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase ) # 3. down SCREAMING_SNAKE_CASE : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: SCREAMING_SNAKE_CASE : int = () for down_block_res_sample, down_block_additional_residual in zip( _lowerCamelCase , _lowerCamelCase ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples # 4. mid SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :] SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = up_block( _lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , ) else: SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train ) # 6. post-process SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
313
0
import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class __A ( a , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Optional[Any] =VideoToVideoSDPipeline UpperCamelCase__ : str =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""} UpperCamelCase__ : str =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""} UpperCamelCase__ : List[Any] =PipelineTesterMixin.required_optional_params - {"""latents"""} UpperCamelCase__ : List[Any] =False # No `output_type`. UpperCamelCase__ : List[str] =frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def __lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) __UpperCamelCase : str =UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , ) __UpperCamelCase : List[Any] =DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , ) torch.manual_seed(0 ) __UpperCamelCase : Optional[Any] =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __UpperCamelCase : Optional[Any] =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __UpperCamelCase : List[str] =CLIPTextModel(lowerCamelCase__ ) __UpperCamelCase : Dict =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __UpperCamelCase : List[Any] ={ 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ): """simple docstring""" __UpperCamelCase : str =floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) if str(lowerCamelCase__ ).startswith('mps' ): __UpperCamelCase : Optional[int] =torch.manual_seed(lowerCamelCase__ ) else: __UpperCamelCase : Union[str, Any] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __UpperCamelCase : Dict ={ 'prompt': 'A painting of a squirrel eating a burger', 'video': video, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any ='cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase : str =self.get_dummy_components() __UpperCamelCase : Any =VideoToVideoSDPipeline(**lowerCamelCase__ ) __UpperCamelCase : str =sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : Tuple =self.get_dummy_inputs(lowerCamelCase__ ) __UpperCamelCase : Dict ='np' __UpperCamelCase : List[Any] =sd_pipe(**lowerCamelCase__ ).frames __UpperCamelCase : Optional[int] =frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) __UpperCamelCase : Union[str, Any] =np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowercase ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ , expected_max_diff=5E-3 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def __lowercase ( self ): """simple docstring""" pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def __lowercase ( self ): """simple docstring""" pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def __lowercase ( self ): """simple docstring""" pass def __lowercase ( self ): """simple docstring""" return super().test_progress_bar() @slow @skip_mps class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] =VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames __UpperCamelCase : str =torch.Generator(device='cpu' ).manual_seed(0 ) __UpperCamelCase : Union[str, Any] =torch.randn((1, 10, 3, 1024, 576) , generator=lowerCamelCase__ ) __UpperCamelCase : List[Any] =video.to('cuda' ) __UpperCamelCase : Union[str, Any] ='Spiderman is surfing' __UpperCamelCase : Tuple =pipe(lowerCamelCase__ , video=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=3 , output_type='pt' ).frames __UpperCamelCase : List[str] =np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
71
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a_ ( a__ , a__ , a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline __SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS __SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __SCREAMING_SNAKE_CASE : Any = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : Tuple = frozenset([] ) def __lowerCAmelCase ( self ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : Optional[Any] = 32 SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size # image encoding components SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL() SCREAMING_SNAKE_CASE : Optional[Any] = { # image encoding components '''feature_extractor''': feature_extractor, '''image_encoder''': image_encoder.eval(), # image noising components '''image_normalizer''': image_normalizer.eval(), '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder.eval(), '''unet''': unet.eval(), '''scheduler''': scheduler, '''vae''': vae.eval(), } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]: if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if pil_image: SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase ) inputs.update({'''image_embeds''': None} ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps'''] self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) ->Optional[int]: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' ) SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' ) SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[Any]: SCREAMING_SNAKE_CASE : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) SCREAMING_SNAKE_CASE : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' ) SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' ) SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Dict = pipe( _lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
313
0
"""simple docstring""" from __future__ import annotations import time lowerCAmelCase__ = list[tuple[int, int]] lowerCAmelCase__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCAmelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class __snake_case : def __init__( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None ): """simple docstring""" _lowerCamelCase : List[Any] = pos_x _lowerCamelCase : List[Any] = pos_y _lowerCamelCase : str = (pos_y, pos_x) _lowerCamelCase : Tuple = goal_x _lowerCamelCase : int = goal_y _lowerCamelCase : int = parent class __snake_case : def __init__( self : Any , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : tuple[int, int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , __lowerCAmelCase ) _lowerCamelCase : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , __lowerCAmelCase ) _lowerCamelCase : int = [self.start] _lowerCamelCase : Optional[Any] = False def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" while self.node_queue: _lowerCamelCase : Dict = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: _lowerCamelCase : Tuple = True return self.retrace_path(__lowerCAmelCase ) _lowerCamelCase : List[str] = self.get_successors(__lowerCAmelCase ) for node in successors: self.node_queue.append(__lowerCAmelCase ) if not self.reached: return [self.start.pos] return None def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Node ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [] for action in delta: _lowerCamelCase : List[str] = parent.pos_x + action[1] _lowerCamelCase : Optional[int] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , __lowerCAmelCase ) ) return successors def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Node | None ): """simple docstring""" _lowerCamelCase : Tuple = node _lowerCamelCase : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _lowerCamelCase : Optional[int] = current_node.parent path.reverse() return path class __snake_case : def __init__( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = BreadthFirstSearch(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Tuple = BreadthFirstSearch(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : int = False def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: _lowerCamelCase : Optional[Any] = self.fwd_bfs.node_queue.pop(0 ) _lowerCamelCase : List[str] = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: _lowerCamelCase : Optional[Any] = True return self.retrace_bidirectional_path( __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : str = current_bwd_node _lowerCamelCase : Union[str, Any] = current_fwd_node _lowerCamelCase : str = { self.fwd_bfs: self.fwd_bfs.get_successors(__lowerCAmelCase ), self.bwd_bfs: self.bwd_bfs.get_successors(__lowerCAmelCase ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__lowerCAmelCase ) if not self.reached: return [self.fwd_bfs.start.pos] return None def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ): """simple docstring""" _lowerCamelCase : Optional[Any] = self.fwd_bfs.retrace_path(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = self.bwd_bfs.retrace_path(__lowerCAmelCase ) bwd_path.pop() bwd_path.reverse() _lowerCamelCase : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowerCAmelCase__ = (0, 0) lowerCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCAmelCase__ = time.time() lowerCAmelCase__ = BreadthFirstSearch(init, goal) lowerCAmelCase__ = bfs.search() lowerCAmelCase__ = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) lowerCAmelCase__ = time.time() lowerCAmelCase__ = BidirectionalBreadthFirstSearch(init, goal) lowerCAmelCase__ = bd_bfs.search() lowerCAmelCase__ = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
72
from abc import ABC, abstractmethod from typing import List, Optional class a_ ( a__ ): """simple docstring""" def __init__( self ) ->List[str]: # test for the above condition self.test() def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = False while not completed: if counter == 1: self.reset() SCREAMING_SNAKE_CASE : List[Any] = self.advance() if not self.does_advance(_lowerCamelCase ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase ) counter += 1 if counter > 1_0000: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def __lowerCAmelCase ( self ) ->Optional[int]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self ) ->Union[str, Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase ) ->int: super(_lowerCamelCase , self ).__init__() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" ) if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" ) SCREAMING_SNAKE_CASE : Optional[Any] = token_ids SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids ) SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step SCREAMING_SNAKE_CASE : Any = False def __lowerCAmelCase ( self ) ->List[Any]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : str = False SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[Any] = False if self.does_advance(_lowerCamelCase ): self.fulfilled_idx += 1 SCREAMING_SNAKE_CASE : str = True if self.fulfilled_idx == (self.seqlen - 1): SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : Union[str, Any] = completed else: # failed to make progress. SCREAMING_SNAKE_CASE : Dict = True self.reset() return stepped, completed, reset def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = 0 def __lowerCAmelCase ( self ) ->Any: return self.seqlen - (self.fulfilled_idx + 1) def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict: SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : Dict = self.seqlen SCREAMING_SNAKE_CASE : int = self.fulfilled_idx SCREAMING_SNAKE_CASE : Tuple = self.completed return new_constraint class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict: SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] ) SCREAMING_SNAKE_CASE : List[str] = {} for token_ids in nested_token_ids: SCREAMING_SNAKE_CASE : Optional[Any] = root for tidx, token_id in enumerate(_lowerCamelCase ): if token_id not in level: SCREAMING_SNAKE_CASE : Any = {} SCREAMING_SNAKE_CASE : Tuple = level[token_id] if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' F""" {nested_token_ids}.""" ) SCREAMING_SNAKE_CASE : List[Any] = root def __lowerCAmelCase ( self , _lowerCamelCase ) ->int: SCREAMING_SNAKE_CASE : List[Any] = self.trie for current_token in current_seq: SCREAMING_SNAKE_CASE : int = start[current_token] SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() ) return next_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase ) return len(_lowerCamelCase ) == 0 def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: SCREAMING_SNAKE_CASE : Any = list(root.values() ) if len(_lowerCamelCase ) == 0: return 1 else: return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase ) return len(_lowerCamelCase ) != leaf_count class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase ) ->str: super(_lowerCamelCase , self ).__init__() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" ) if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ): raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" ) if any( any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" ) SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = nested_token_ids SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Optional[int] = False def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq ) if len(_lowerCamelCase ) == 0: return None else: return token_list def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = False if self.does_advance(_lowerCamelCase ): self.current_seq.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = True else: SCREAMING_SNAKE_CASE : Dict = True self.reset() SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq ) SCREAMING_SNAKE_CASE : List[Any] = completed return stepped, completed, reset def __lowerCAmelCase ( self ) ->Optional[Any]: SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[Any] = [] def __lowerCAmelCase ( self ) ->Optional[Any]: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]: SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : str = self.seqlen SCREAMING_SNAKE_CASE : int = self.current_seq SCREAMING_SNAKE_CASE : Optional[int] = self.completed return new_constraint class a_ : """simple docstring""" def __init__( self , _lowerCamelCase ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : List[Any] = constraints # max # of steps required to fulfill a given constraint SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] ) SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = False self.init_state() def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints] def __lowerCAmelCase ( self ) ->str: SCREAMING_SNAKE_CASE : str = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Tuple = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance() if isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.append(_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.extend(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance() if isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.append(_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.extend(_lowerCamelCase ) if len(_lowerCamelCase ) == 0: return None else: return token_list def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False if self.completed: SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Optional[int] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Optional[int] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) SCREAMING_SNAKE_CASE : str = None if len(self.pending_constraints ) == 0: # we're done! SCREAMING_SNAKE_CASE : Optional[Any] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_lowerCamelCase ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = None if not complete and stepped: SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. SCREAMING_SNAKE_CASE : str = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str: SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: SCREAMING_SNAKE_CASE : str = [ constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints] return new_state
313
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Union[str, Any] = CycleDiffusionPipeline _UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } _UpperCAmelCase : int = PipelineTesterMixin.required_optional_params - {'''latents'''} _UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) _UpperCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int]): torch.manual_seed(0) __lowerCamelCase : Tuple = UNetaDConditionModel( block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=3_2 ,) __lowerCamelCase : Optional[int] = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,num_train_timesteps=1_0_0_0 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,) torch.manual_seed(0) __lowerCamelCase : str = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0) __lowerCamelCase : List[Any] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,) __lowerCamelCase : int = CLIPTextModel(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') __lowerCamelCase : Tuple = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str]=0): __lowerCamelCase : str = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = image / 2 + 0.5 if str(SCREAMING_SNAKE_CASE__).startswith('mps'): __lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__) else: __lowerCamelCase : Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = { 'prompt': 'An astronaut riding an elephant', 'source_prompt': 'An astronaut riding a horse', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'eta': 0.1, 'strength': 0.8, 'guidance_scale': 3, 'source_guidance_scale': 1, 'output_type': 'numpy', } return inputs def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Dict = self.get_dummy_components() __lowerCamelCase : Dict = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = pipe(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = output.images __lowerCamelCase : Tuple = images[0, -3:, -3:, -1] assert images.shape == (1, 3_2, 3_2, 3) __lowerCamelCase : Optional[int] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 @unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU') def lowerCAmelCase ( self : str): __lowerCamelCase : Any = self.get_dummy_components() for name, module in components.items(): if hasattr(SCREAMING_SNAKE_CASE__ ,'half'): __lowerCamelCase : Any = module.half() __lowerCamelCase : Union[str, Any] = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = pipe.to(SCREAMING_SNAKE_CASE__) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = pipe(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = output.images __lowerCamelCase : Any = images[0, -3:, -3:, -1] assert images.shape == (1, 3_2, 3_2, 3) __lowerCamelCase : Optional[int] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Dict): return super().test_save_load_local() @unittest.skip('non-deterministic pipeline') def lowerCAmelCase ( self : Optional[Any]): return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Any): return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : Optional[Any]): return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : Tuple): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : List[Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any]): __lowerCamelCase : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/cycle-diffusion/black_colored_car.png') __lowerCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy') __lowerCamelCase : Optional[int] = init_image.resize((5_1_2, 5_1_2)) __lowerCamelCase : Optional[int] = 'CompVis/stable-diffusion-v1-4' __lowerCamelCase : Tuple = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ,subfolder='scheduler') __lowerCamelCase : Optional[int] = CycleDiffusionPipeline.from_pretrained( SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa ,revision='fp16') pipe.to(SCREAMING_SNAKE_CASE__) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) pipe.enable_attention_slicing() __lowerCamelCase : Union[str, Any] = 'A black colored car' __lowerCamelCase : Optional[int] = 'A blue colored car' __lowerCamelCase : List[Any] = torch.manual_seed(0) __lowerCamelCase : Union[str, Any] = pipe( prompt=SCREAMING_SNAKE_CASE__ ,source_prompt=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,eta=0.1 ,strength=0.85 ,guidance_scale=3 ,source_guidance_scale=1 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np' ,) __lowerCamelCase : List[Any] = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image).max() < 5E-1 def lowerCAmelCase ( self : Union[str, Any]): __lowerCamelCase : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/cycle-diffusion/black_colored_car.png') __lowerCamelCase : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy') __lowerCamelCase : List[str] = init_image.resize((5_1_2, 5_1_2)) __lowerCamelCase : int = 'CompVis/stable-diffusion-v1-4' __lowerCamelCase : int = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ,subfolder='scheduler') __lowerCamelCase : Optional[Any] = CycleDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__) pipe.to(SCREAMING_SNAKE_CASE__) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) pipe.enable_attention_slicing() __lowerCamelCase : str = 'A black colored car' __lowerCamelCase : List[str] = 'A blue colored car' __lowerCamelCase : int = torch.manual_seed(0) __lowerCamelCase : Tuple = pipe( prompt=SCREAMING_SNAKE_CASE__ ,source_prompt=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,eta=0.1 ,strength=0.85 ,guidance_scale=3 ,source_guidance_scale=1 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np' ,) __lowerCamelCase : List[Any] = output.images assert np.abs(image - expected_image).max() < 2E-2
73
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ): """simple docstring""" set_seed(3 ) # generate train_data and objective_set SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets( a__ , a__ , number=a__ , min_len=1_026 , trim=a__ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # load pretrained model SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ ) print('''computing perplexity on objective set''' ) SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item() print('''perplexity on objective set:''' , a__ ) # collect igf pairs and save to file demo.jbl collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ): """simple docstring""" set_seed(42 ) # Load pre-trained model SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' ) # Initialize secondary learner to use embedding weights of model SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ ) # Train secondary learner SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner( a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ ) SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ ) SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1 SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ ) model.train() if secondary_learner is not None: secondary_learner.to(a__ ) secondary_learner.eval() SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Tuple = [] # Compute the performance of the transformer model at the beginning SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ ) test_perps.append(a__ ) print('''Test perplexity, step''' , a__ , ''':''' , a__ ) for epoch in range(int(a__ ) ): for step, example in enumerate(a__ ): torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 ) SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = True if secondary_learner is not None: SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward( torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(a__ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: SCREAMING_SNAKE_CASE : str = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) SCREAMING_SNAKE_CASE : List[str] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ ) test_perps.append(a__ ) print('''Test perplexity, step''' , a__ , ''':''' , a__ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , a__ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def UpperCAmelCase_( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' ) # Required parameters parser.add_argument( '''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , ) parser.add_argument( '''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--data_file''' , type=a__ , default=a__ , help=( '''A jbl file containing tokenized data which can be split as objective dataset, ''' '''train_dataset and test_dataset.''' ) , ) parser.add_argument( '''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , ) parser.add_argument( '''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , ) parser.add_argument( '''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' ) parser.add_argument( '''--context_len''' , default=32 , type=a__ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , ) parser.add_argument( '''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' ) parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' ) parser.add_argument( '''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , ) parser.add_argument( '''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' ) parser.add_argument( '''--eval_interval''' , default=10 , type=a__ , help=( '''decay the selectivity of our secondary learner filter from''' '''1 standard deviation above average to 1 below average after 10 batches''' ) , ) parser.add_argument( '''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' ) parser.add_argument( '''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' ) parser.add_argument( '''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' ) parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' ) parser.add_argument( '''--threshold''' , default=1.0 , type=a__ , help=( '''The threshold value used by secondary learner to filter the train_data and allow only''' ''' informative data as input to the model''' ) , ) parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' ) parser.add_argument( '''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , ) # Load train data for secondary learner SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' ) # Train secondary learner SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner( a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , ) # load pretrained gpt2 model SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets( context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , ) if __name__ == "__main__": main()
313
0
"""simple docstring""" from string import ascii_uppercase _lowercase = {char: i for i, char in enumerate(ascii_uppercase)} _lowercase = dict(enumerate(ascii_uppercase)) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = len(snake_case__ ) A = 0 while True: if x == i: A = 0 if len(snake_case__ ) == len(snake_case__ ): break key += key[i] i += 1 return key def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in message: if letter == " ": cipher_text += " " else: A = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def _snake_case ( snake_case__ : str , snake_case__ : str ): A = '' A = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: A = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def _snake_case ( ): A = 'THE GERMAN ATTACK' A = 'SECRET' A = generate_key(snake_case__ , snake_case__ ) A = cipher_text(snake_case__ , snake_case__ ) print(F'Encrypted Text = {s}' ) print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
74
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() ) SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params a__ : Any = logging.getLogger(__name__) def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if metric == "rouge2": SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}''' elif metric == "loss": SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}''' else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ''' function.''' ) SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint( dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return EarlyStopping( monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , ) class a_ ( pl.Callback ): """simple docstring""" def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_lowerCamelCase ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None: logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir ) if type_path == "test": SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt''' SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt""" SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=_lowerCamelCase ) generations_file.parent.mkdir(exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , '''a+''' ) as writer: for key in sorted(_lowerCamelCase ): if key in ["log", "progress_bar", "preds"]: continue SCREAMING_SNAKE_CASE : Tuple = metrics[key] if isinstance(_lowerCamelCase , torch.Tensor ): SCREAMING_SNAKE_CASE : List[Any] = val.item() SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n""" writer.write(_lowerCamelCase ) if not save_generations: return if "preds" in metrics: SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(_lowerCamelCase ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: try: SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters() except AttributeError: SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters() SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
313
0
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class __UpperCamelCase : def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =str(id_ ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =[] lowerCamelCase_ ={} # {vertex:distance} def __lt__( self, lowerCAmelCase ): """simple docstring""" return self.key < other.key def __repr__( self ): """simple docstring""" return self.id def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.neighbors.append(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =weight def a_ ( __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> str: """simple docstring""" # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , __snake_case ) graph[b - 1].add_edge(graph[a - 1] , __snake_case ) def a_ ( __snake_case : list , __snake_case : Vertex ) -> list: """simple docstring""" lowerCamelCase_ =[] for u in graph: lowerCamelCase_ =math.inf lowerCamelCase_ =None lowerCamelCase_ =0 lowerCamelCase_ =graph[:] while q: lowerCamelCase_ =min(__snake_case ) q.remove(__snake_case ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): lowerCamelCase_ =u lowerCamelCase_ =u.edges[v.id] for i in range(1 , len(__snake_case ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def a_ ( __snake_case : list , __snake_case : Vertex ) -> Iterator[tuple]: """simple docstring""" for u in graph: lowerCamelCase_ =math.inf lowerCamelCase_ =None lowerCamelCase_ =0 lowerCamelCase_ =list(__snake_case ) hq.heapify(__snake_case ) while h: lowerCamelCase_ =hq.heappop(__snake_case ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): lowerCamelCase_ =u lowerCamelCase_ =u.edges[v.id] hq.heapify(__snake_case ) for i in range(1 , len(__snake_case ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def a_ ( ) -> None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
75
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def UpperCAmelCase_( a__ ): """simple docstring""" if ( (cp >= 0x4_E00 and cp <= 0x9_FFF) or (cp >= 0x3_400 and cp <= 0x4_DBF) # or (cp >= 0x20_000 and cp <= 0x2A_6DF) # or (cp >= 0x2A_700 and cp <= 0x2B_73F) # or (cp >= 0x2B_740 and cp <= 0x2B_81F) # or (cp >= 0x2B_820 and cp <= 0x2C_EAF) # or (cp >= 0xF_900 and cp <= 0xF_AFF) or (cp >= 0x2F_800 and cp <= 0x2F_A1F) # ): # return True return False def UpperCAmelCase_( a__ ): """simple docstring""" for char in word: SCREAMING_SNAKE_CASE : str = ord(a__ ) if not _is_chinese_char(a__ ): return 0 return 1 def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = set() for token in tokens: SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ ) if chinese_word: word_set.add(a__ ) SCREAMING_SNAKE_CASE : str = list(a__ ) return word_list def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if not chinese_word_set: return bert_tokens SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] ) SCREAMING_SNAKE_CASE : Tuple = bert_tokens SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ ) while start < end: SCREAMING_SNAKE_CASE : Dict = True if is_chinese(bert_word[start] ): SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ ) for i in range(a__ , 1 , -1 ): SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j] SCREAMING_SNAKE_CASE : List[str] = start + i SCREAMING_SNAKE_CASE : Optional[Any] = False break if single_word: start += 1 return bert_word def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] for i in range(0 , len(a__ ) , 100 ): SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0] SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res] ltp_res.extend(a__ ) assert len(a__ ) == len(a__ ) SCREAMING_SNAKE_CASE : Any = [] for i in range(0 , len(a__ ) , 100 ): SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(a__ ) == len(a__ ) SCREAMING_SNAKE_CASE : int = [] for input_ids, chinese_word in zip(a__ , a__ ): SCREAMING_SNAKE_CASE : List[Any] = [] for id in input_ids: SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ ) input_tokens.append(a__ ) SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ ) SCREAMING_SNAKE_CASE : Dict = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(a__ ): if token[:2] == "##": SCREAMING_SNAKE_CASE : Optional[int] = token[2:] # save chinese tokens' pos if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ): ref_id.append(a__ ) ref_ids.append(a__ ) assert len(a__ ) == len(a__ ) return ref_ids def UpperCAmelCase_( a__ ): """simple docstring""" with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : List[str] = f.readlines() SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert ) SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids] f.writelines(a__ ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') a__ : int = parser.parse_args() main(args)
313
0
def lowerCamelCase__ ( _a , _a): return abs(_a) if a == 0 else greatest_common_divisor(b % a , _a) def lowerCamelCase__ ( _a , _a): while y: # --> when y=0 then loop will terminate and return x as final GCD. SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = y, x % y return abs(_a) def lowerCamelCase__ ( ): try: SCREAMING_SNAKE_CASE : int = input("Enter two integers separated by comma (,): ").split(",") SCREAMING_SNAKE_CASE : Optional[int] = int(nums[0]) SCREAMING_SNAKE_CASE : Union[str, Any] = int(nums[1]) print( f"greatest_common_divisor({num_a}, {num_a}) = " f"{greatest_common_divisor(_a , _a)}") print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_a , _a)}") except (IndexError, UnboundLocalError, ValueError): print("Wrong input") if __name__ == "__main__": main()
76
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}""" SCREAMING_SNAKE_CASE : Tuple = '''1''' SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le''' SCREAMING_SNAKE_CASE : List[Any] = [ '''ffmpeg''', '''-i''', '''pipe:0''', '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] try: with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ ) except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0] SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa ) if audio.shape[0] == 0: raise ValueError('''Malformed soundfile''' ) return audio def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}""" SCREAMING_SNAKE_CASE : Dict = '''1''' if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE : List[Any] = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE : Dict = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system() if system == "Linux": SCREAMING_SNAKE_CASE : Dict = '''alsa''' SCREAMING_SNAKE_CASE : Any = '''default''' elif system == "Darwin": SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation''' SCREAMING_SNAKE_CASE : Optional[int] = ''':0''' elif system == "Windows": SCREAMING_SNAKE_CASE : int = '''dshow''' SCREAMING_SNAKE_CASE : Any = '''default''' SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''ffmpeg''', '''-f''', format_, '''-i''', input_, '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-fflags''', '''nobuffer''', '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ ) for item in iterator: yield item def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ): """simple docstring""" if stream_chunk_s is not None: SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s else: SCREAMING_SNAKE_CASE : List[str] = chunk_length_s SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ ) if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE : Optional[int] = np.intaa SCREAMING_SNAKE_CASE : List[Any] = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE : Any = np.floataa SCREAMING_SNAKE_CASE : Union[str, Any] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6 SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(a__ , (int, float) ): SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s] SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now() SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ ) for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ): # Put everything back in numpy scale SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = ( item['''stride'''][0] // size_of_sample, item['''stride'''][1] // size_of_sample, ) SCREAMING_SNAKE_CASE : Any = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = b'''''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride if stride_left + stride_right >= chunk_len: raise ValueError( F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for raw in iterator: acc += raw if stream and len(a__ ) < chunk_len: SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(a__ ) >= chunk_len: # We are flushing the accumulator SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right) SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride} if stream: SCREAMING_SNAKE_CASE : List[str] = False yield item SCREAMING_SNAKE_CASE : Dict = stride_left SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(a__ ) > stride_left: SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)} if stream: SCREAMING_SNAKE_CASE : Union[str, Any] = False yield item def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo try: with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process: while True: SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
313
0
"""simple docstring""" from manim import * class UpperCAmelCase_ ( _a): def _UpperCAmelCase ( self ) -> int: lowercase__ : str = Rectangle(height=0.5 , width=0.5 ) lowercase__ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowercase__ : List[str] = [mem.copy() for i in range(6 )] lowercase__ : int = [mem.copy() for i in range(6 )] lowercase__ : int = VGroup(*a ).arrange(a , buff=0 ) lowercase__ : Optional[int] = VGroup(*a ).arrange(a , buff=0 ) lowercase__ : List[str] = VGroup(a , a ).arrange(a , buff=0 ) lowercase__ : Union[str, Any] = Text('CPU' , font_size=2_4 ) lowercase__ : Union[str, Any] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a ) cpu.move_to([-2.5, -0.5, 0] ) self.add(a ) lowercase__ : Union[str, Any] = [mem.copy() for i in range(4 )] lowercase__ : List[Any] = VGroup(*a ).arrange(a , buff=0 ) lowercase__ : Tuple = Text('GPU' , font_size=2_4 ) lowercase__ : Optional[int] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a ) gpu.move_to([-1, -1, 0] ) self.add(a ) lowercase__ : int = [mem.copy() for i in range(6 )] lowercase__ : List[str] = VGroup(*a ).arrange(a , buff=0 ) lowercase__ : int = Text('Model' , font_size=2_4 ) lowercase__ : Optional[Any] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a ) model.move_to([3, -1.0, 0] ) self.add(a ) lowercase__ : Dict = [] for i, rect in enumerate(a ): rect.set_stroke(a ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) lowercase__ : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=a , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=a , buff=0.0 ) self.add(a ) cpu_targs.append(a ) lowercase__ : Any = [mem.copy() for i in range(6 )] lowercase__ : Optional[Any] = VGroup(*a ).arrange(a , buff=0 ) lowercase__ : Optional[int] = Text('Loaded Checkpoint' , font_size=2_4 ) lowercase__ : Dict = Group(a , a ).arrange(a , aligned_edge=a , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) lowercase__ : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowercase__ : Any = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(a , a ) lowercase__ : List[Any] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(a , DOWN * 2.4 , aligned_edge=key_text.get_left() ) lowercase__ : List[str] = MarkupText( f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(a ) , Write(a ) ) self.play(Write(a , run_time=1 ) , Create(a , run_time=1 ) ) lowercase__ : str = [] lowercase__ : Any = [] for i, rect in enumerate(a ): lowercase__ : Optional[int] = fill.copy().set_fill(a , opacity=0.7 ) target.move_to(a ) first_animations.append(GrowFromCenter(a , run_time=1 ) ) lowercase__ : Optional[Any] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(a , run_time=1.5 ) ) self.play(*a ) self.play(*a ) self.wait()
77
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
313
0
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder snake_case_ = """base_with_context""" def _lowerCAmelCase ( lowercase_ , lowercase_ ): UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ ) for lyr_num, lyr in enumerate(model.encoders ): UpperCAmelCase = weights[F"""layers_{lyr_num}"""] UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) UpperCAmelCase = ly_weight['attention'] UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def _lowerCAmelCase ( lowercase_ , lowercase_ ): UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ ) for lyr_num, lyr in enumerate(model.encoders ): UpperCAmelCase = weights[F"""layers_{lyr_num}"""] UpperCAmelCase = ly_weight['attention'] UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def _lowerCAmelCase ( lowercase_ , lowercase_ ): UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): UpperCAmelCase = weights[F"""layers_{lyr_num}"""] UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) UpperCAmelCase = ly_weight['self_attention'] UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) UpperCAmelCase = ly_weight['MultiHeadDotProductAttention_0'] UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) UpperCAmelCase = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path ) UpperCAmelCase = jnp.tree_util.tree_map(onp.array , lowercase_ ) UpperCAmelCase = [ 'from __gin__ import dynamic_registration', 'from music_spectrogram_diffusion.models.diffusion import diffusion_utils', 'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0', 'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()', ] UpperCAmelCase = os.path.join(args.checkpoint_path , '..' , 'config.gin' ) UpperCAmelCase = inference.parse_training_gin_file(lowercase_ , lowercase_ ) UpperCAmelCase = inference.InferenceModel(args.checkpoint_path , lowercase_ ) UpperCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) UpperCAmelCase = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) UpperCAmelCase = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) UpperCAmelCase = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) UpperCAmelCase = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowercase_ ) UpperCAmelCase = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowercase_ ) UpperCAmelCase = load_decoder(ta_checkpoint['target']['decoder'] , lowercase_ ) UpperCAmelCase = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) UpperCAmelCase = SpectrogramDiffusionPipeline( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument( """--checkpoint_path""", default=f'''{MODEL}/checkpoint_500000''', type=str, required=False, help="""Path to the original jax model checkpoint.""", ) snake_case_ = parser.parse_args() main(args)
78
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a__ : int = logging.get_logger(__name__) a__ : Optional[Any] = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr' __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' ) SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = use_timm_backbone SCREAMING_SNAKE_CASE : Optional[int] = backbone_config SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = num_queries SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : Optional[int] = d_model SCREAMING_SNAKE_CASE : str = encoder_ffn_dim SCREAMING_SNAKE_CASE : str = encoder_layers SCREAMING_SNAKE_CASE : str = encoder_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim SCREAMING_SNAKE_CASE : int = decoder_layers SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads SCREAMING_SNAKE_CASE : List[str] = dropout SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout SCREAMING_SNAKE_CASE : str = activation_dropout SCREAMING_SNAKE_CASE : Optional[int] = activation_function SCREAMING_SNAKE_CASE : Optional[int] = init_std SCREAMING_SNAKE_CASE : List[str] = init_xavier_std SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type SCREAMING_SNAKE_CASE : str = backbone SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Dict = dilation # deformable attributes SCREAMING_SNAKE_CASE : str = num_feature_levels SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points SCREAMING_SNAKE_CASE : Any = decoder_n_points SCREAMING_SNAKE_CASE : str = two_stage SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals SCREAMING_SNAKE_CASE : Dict = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher SCREAMING_SNAKE_CASE : int = class_cost SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost SCREAMING_SNAKE_CASE : Optional[int] = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient SCREAMING_SNAKE_CASE : Tuple = focal_alpha SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def __lowerCAmelCase ( self ) ->int: return self.encoder_attention_heads @property def __lowerCAmelCase ( self ) ->int: return self.d_model def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
313
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''', # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''biogpt''' def __init__( self : int , __UpperCAmelCase : Optional[Any]=42384 , __UpperCAmelCase : List[Any]=1024 , __UpperCAmelCase : Dict=24 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Optional[int]=4096 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : List[Any]=1024 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Union[str, Any]=1E-12 , __UpperCAmelCase : str=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : int=1 , __UpperCAmelCase : Any=0 , __UpperCAmelCase : str=2 , **__UpperCAmelCase : Dict , ): '''simple docstring''' _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = layer_norm_eps _A = scale_embedding _A = use_cache _A = layerdrop _A = activation_dropout super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
79
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,) __SCREAMING_SNAKE_CASE : Optional[int] = 10 def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**_lowerCamelCase ) return config def __lowerCAmelCase ( self ) ->Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->int: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : int = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2 assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3 def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model() SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model() SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = output.prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
313
0
'''simple docstring''' import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) a__ : Dict = logging.getLogger(__name__) if __name__ == "__main__": a__ : str = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=3_0_5_2_2, type=int) a__ : Optional[Any] = parser.parse_args() logger.info(F"""Loading data from {args.data_file}""") with open(args.data_file, 'rb') as fp: a__ : Union[str, Any] = pickle.load(fp) logger.info('Counting occurrences for MLM.') a__ : List[str] = Counter() for tk_ids in data: counter.update(tk_ids) a__ : str = [0] * args.vocab_size for k, v in counter.items(): a__ : Any = v logger.info(F"""Dump to {args.token_counts_dump}""") with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
80
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer a__ : Dict = logging.get_logger(__name__) a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} a__ : str = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } a__ : Optional[int] = { '''allenai/led-base-16384''': 16_384, } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer __SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]: super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` SCREAMING_SNAKE_CASE : List[Any] = '''post_processor''' SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] ) if "cls" in state: SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] ) SCREAMING_SNAKE_CASE : Any = False if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space SCREAMING_SNAKE_CASE : Union[str, Any] = True if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets: SCREAMING_SNAKE_CASE : List[Any] = trim_offsets SCREAMING_SNAKE_CASE : Union[str, Any] = True if changes_to_apply: SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def __lowerCAmelCase ( self ) ->str: if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]: SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value SCREAMING_SNAKE_CASE : List[Any] = value def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding: SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding: SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]: SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any: SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]: SCREAMING_SNAKE_CASE : Any = [self.sep_token_id] SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict: SCREAMING_SNAKE_CASE : Tuple = super()._pad( encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase ) if needs_to_be_padded: SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` SCREAMING_SNAKE_CASE : str = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
313
0
"""simple docstring""" def _A ( lowercase ): """simple docstring""" a =int(lowercase ) if n_element < 1: a =ValueError('''a should be a positive number''' ) raise my_error a =[1] a , a , a =(0, 0, 0) a =1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCamelCase_ : List[str] = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") lowerCamelCase_ : List[Any] = hamming(int(n)) print("""-----------------------------------------------------""") print(F'The list with nth numbers is: {hamming_numbers}') print("""-----------------------------------------------------""")
81
from __future__ import annotations import math def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) SCREAMING_SNAKE_CASE : Dict = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(a__ ) ) ] def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(a__ ) ) ] def UpperCAmelCase_( a__ ): """simple docstring""" if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) SCREAMING_SNAKE_CASE : str = len(a__ ) SCREAMING_SNAKE_CASE : Any = matrix_length // 2 SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )] SCREAMING_SNAKE_CASE : Optional[int] = [ [a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ ) ] SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )] SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )] return top_left, top_right, bot_left, bot_right def UpperCAmelCase_( a__ ): """simple docstring""" return len(a__ ), len(matrix[0] ) def UpperCAmelCase_( a__ ): """simple docstring""" print('''\n'''.join(str(a__ ) for line in matrix ) ) def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if matrix_dimensions(a__ ) == (2, 2): return default_matrix_multiplication(a__ , a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ ) SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) ) SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ ) SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ ) SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ ) # construct the new matrix from our 4 quadrants SCREAMING_SNAKE_CASE : Optional[Any] = [] for i in range(len(a__ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(a__ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]: SCREAMING_SNAKE_CASE : Any = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(a__ ) SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ ) SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) ) SCREAMING_SNAKE_CASE : Optional[int] = matrixa SCREAMING_SNAKE_CASE : Tuple = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , a__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ ) # Removing the additional zeros for i in range(0 , a__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": a__ : Dict = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
313
0
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : def __init__( self , _snake_case , _snake_case=2 , _snake_case=8 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=16 , _snake_case=5 , _snake_case=2 , _snake_case=36 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope def snake_case ( self ): """simple docstring""" _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_token_type_ids: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.get_config() _lowerCAmelCase = 300 return config def snake_case ( self ): """simple docstring""" ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = self.prepare_config_and_inputs() _lowerCAmelCase = True _lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = MraModel(config=_snake_case ) model.to(_snake_case ) model.eval() _lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) _lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case ) _lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" _lowerCAmelCase = True _lowerCAmelCase = MraModel(_snake_case ) model.to(_snake_case ) model.eval() _lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , ) _lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , ) _lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = MraForMaskedLM(config=_snake_case ) model.to(_snake_case ) model.eval() _lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = MraForQuestionAnswering(config=_snake_case ) model.to(_snake_case ) model.eval() _lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() _lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = self.num_labels _lowerCAmelCase = MraForTokenClassification(config=_snake_case ) model.to(_snake_case ) model.eval() _lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = self.num_choices _lowerCAmelCase = MraForMultipleChoice(config=_snake_case ) model.to(_snake_case ) model.eval() _lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = () def snake_case ( self ): """simple docstring""" _lowerCAmelCase = MraModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase = type self.model_tester.create_and_check_model(*_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) @slow def snake_case ( self ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = MraModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip(reason="""MRA does not output attentions""" ) def snake_case ( self ): """simple docstring""" return @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def snake_case ( self ): """simple docstring""" _lowerCAmelCase = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_snake_case )[0] _lowerCAmelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _snake_case ) _lowerCAmelCase = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) ) @slow def snake_case ( self ): """simple docstring""" _lowerCAmelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) _lowerCAmelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_snake_case )[0] _lowerCAmelCase = 50265 _lowerCAmelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _snake_case ) _lowerCAmelCase = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) ) @slow def snake_case ( self ): """simple docstring""" _lowerCAmelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) _lowerCAmelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): _lowerCAmelCase = model(_snake_case )[0] _lowerCAmelCase = 50265 _lowerCAmelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , _snake_case ) _lowerCAmelCase = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) )
82
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''') class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any: SCREAMING_SNAKE_CASE : str = scheduler SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers] SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer SCREAMING_SNAKE_CASE : List[str] = GradientState() def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes for _ in range(_lowerCamelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) else: self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Union[str, Any]: return self.scheduler.get_last_lr() def __lowerCAmelCase ( self ) ->List[str]: return self.scheduler.state_dict() def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: self.scheduler.load_state_dict(_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: return self.scheduler.get_lr() def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]: return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
313
0
'''simple docstring''' import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=os.environ.get('LOGLEVEL', 'INFO').upper(), stream=sys.stdout, ) snake_case_ : Any = logging.getLogger(__name__) snake_case_ : Optional[Any] = {'facebook/bart-base': BartForConditionalGeneration} snake_case_ : Optional[int] = {'facebook/bart-base': BartTokenizer} def A__ ( ): _UpperCamelCase : List[Any] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' ) parser.add_argument( '--validation_file' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='A csv or a json file containing the validation data.' ) parser.add_argument( '--max_length' , type=UpperCAmelCase_ , default=5 , help='The maximum total input sequence length after tokenization.' , ) parser.add_argument( '--num_beams' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help=( 'Number of beams to use for evaluation. This argument will be ' 'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.' ) , ) parser.add_argument( '--model_name_or_path' , type=UpperCAmelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , ) parser.add_argument( '--config_name' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Pretrained config name or path if not the same as model_name' , ) parser.add_argument( '--device' , type=UpperCAmelCase_ , default='cpu' , help='Device where the model will be run' , ) parser.add_argument('--output_file_path' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Where to store the final ONNX file.' ) _UpperCamelCase : Tuple = parser.parse_args() return args def A__ ( UpperCAmelCase_ , UpperCAmelCase_="cpu" ): _UpperCamelCase : Any = model_dict[model_name].from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ ) _UpperCamelCase : Union[str, Any] = tokenizer_dict[model_name].from_pretrained(UpperCAmelCase_ ) if model_name in ["facebook/bart-base"]: _UpperCamelCase : Any = 0 _UpperCamelCase : int = None _UpperCamelCase : int = 0 return huggingface_model, tokenizer def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): model.eval() _UpperCamelCase : str = None _UpperCamelCase : str = torch.jit.script(BARTBeamSearchGenerator(UpperCAmelCase_ ) ) with torch.no_grad(): _UpperCamelCase : Optional[Any] = 'My friends are cool but they eat too many carbs.' _UpperCamelCase : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors='pt' ).to(model.device ) _UpperCamelCase : List[Any] = model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=UpperCAmelCase_ , max_length=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( UpperCAmelCase_ , ( inputs['input_ids'], inputs['attention_mask'], num_beams, max_length, model.config.decoder_start_token_id, ) , UpperCAmelCase_ , opset_version=1_4 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'seq'}, 'output_ids': {0: 'batch', 1: 'seq_out'}, } , example_outputs=UpperCAmelCase_ , ) logger.info('Model exported to {}'.format(UpperCAmelCase_ ) ) _UpperCamelCase : List[Any] = remove_dup_initializers(os.path.abspath(UpperCAmelCase_ ) ) logger.info('Deduplicated and optimized model written to {}'.format(UpperCAmelCase_ ) ) _UpperCamelCase : Tuple = onnxruntime.InferenceSession(UpperCAmelCase_ ) _UpperCamelCase : Dict = ort_sess.run( UpperCAmelCase_ , { 'input_ids': inputs['input_ids'].cpu().numpy(), 'attention_mask': inputs['attention_mask'].cpu().numpy(), 'num_beams': np.array(UpperCAmelCase_ ), 'max_length': np.array(UpperCAmelCase_ ), 'decoder_start_token_id': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('Model outputs from torch and ONNX Runtime are similar.' ) logger.info('Success.' ) def A__ ( ): _UpperCamelCase : Any = parse_args() _UpperCamelCase : Optional[int] = 5 _UpperCamelCase : List[str] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() _UpperCamelCase : Union[str, Any] = torch.device(args.device ) _UpperCamelCase , _UpperCamelCase : Tuple = load_model_tokenizer(args.model_name_or_path , UpperCAmelCase_ ) if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' ) model.to(UpperCAmelCase_ ) if args.max_length: _UpperCamelCase : Optional[int] = args.max_length if args.num_beams: _UpperCamelCase : Any = args.num_beams if args.output_file_path: _UpperCamelCase : str = args.output_file_path else: _UpperCamelCase : List[Any] = 'BART.onnx' logger.info('Exporting model to ONNX' ) export_and_validate_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if __name__ == "__main__": main()
83
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a__ : Optional[Any] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['''memory_attention''', '''encoder_attn'''], ['''attention''', '''attn'''], ['''/''', '''.'''], ['''.LayerNorm.gamma''', '''_layer_norm.weight'''], ['''.LayerNorm.beta''', '''_layer_norm.bias'''], ['''r.layer_''', '''r.layers.'''], ['''output_proj''', '''out_proj'''], ['''ffn.dense_1.''', '''fc2.'''], ['''ffn.dense.''', '''fc1.'''], ['''ffn_layer_norm''', '''final_layer_norm'''], ['''kernel''', '''weight'''], ['''encoder_layer_norm.''', '''encoder.layer_norm.'''], ['''decoder_layer_norm.''', '''decoder.layer_norm.'''], ['''embeddings.weights''', '''shared.weight'''], ] def UpperCAmelCase_( a__ ): """simple docstring""" for pegasus_name, hf_name in PATTERNS: SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ ) return k def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy() cfg_kwargs.update(a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ ) SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ ) SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict() SCREAMING_SNAKE_CASE : List[str] = {} for k, v in tf_weights.items(): SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: SCREAMING_SNAKE_CASE : Dict = v.T SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) SCREAMING_SNAKE_CASE : int = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ ) SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ): SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name ) if skip_key: continue SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ ) SCREAMING_SNAKE_CASE : Any = array return tf_weights def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(a__ ) # convert model SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ ) SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": SCREAMING_SNAKE_CASE : int = task_specific_params SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ ) torch_model.save_pretrained(a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') a__ : List[str] = parser.parse_args() if args.save_dir is None: a__ : Any = Path(args.tf_ckpt_path).parent.name a__ : int = os.path.join('''pegasus''', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
313
0
"""simple docstring""" import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline __UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11') def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : tuple , lowercase__ : Path , lowercase__ : int , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Tuple=False , ) -> Tuple: '''simple docstring''' output_path.parent.mkdir(parents=lowercase__ , exist_ok=lowercase__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , use_external_data_format=lowercase__ , enable_onnx_checker=lowercase__ , opset_version=lowercase__ , ) else: export( lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , opset_version=lowercase__ , ) @torch.no_grad() def _snake_case ( lowercase__ : str , lowercase__ : str , lowercase__ : int , lowercase__ : bool = False ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :List[Any] = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): lowerCAmelCase_ :List[str] = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: lowerCAmelCase_ :int = """cpu""" lowerCAmelCase_ :Union[str, Any] = StableDiffusionPipeline.from_pretrained(lowercase__ , torch_dtype=lowercase__ ).to(lowercase__ ) lowerCAmelCase_ :Dict = Path(lowercase__ ) # TEXT ENCODER lowerCAmelCase_ :Any = pipeline.text_encoder.config.max_position_embeddings lowerCAmelCase_ :Tuple = pipeline.text_encoder.config.hidden_size lowerCAmelCase_ :Optional[Any] = pipeline.tokenizer( """A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=lowercase__ , return_tensors="""pt""" , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowercase__ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={ """input_ids""": {0: """batch""", 1: """sequence"""}, } , opset=lowercase__ , ) del pipeline.text_encoder # UNET lowerCAmelCase_ :Dict = pipeline.unet.config.in_channels lowerCAmelCase_ :List[str] = pipeline.unet.config.sample_size lowerCAmelCase_ :str = output_path / """unet""" / """model.onnx""" onnx_export( pipeline.unet , model_args=( torch.randn(2 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), torch.randn(2 ).to(device=lowercase__ , dtype=lowercase__ ), torch.randn(2 , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), False, ) , output_path=lowercase__ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={ """sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, """timestep""": {0: """batch"""}, """encoder_hidden_states""": {0: """batch""", 1: """sequence"""}, } , opset=lowercase__ , use_external_data_format=lowercase__ , ) lowerCAmelCase_ :Any = str(unet_path.absolute().as_posix() ) lowerCAmelCase_ :Dict = os.path.dirname(lowercase__ ) lowerCAmelCase_ :Optional[Any] = onnx.load(lowercase__ ) # clean up existing tensor files shutil.rmtree(lowercase__ ) os.mkdir(lowercase__ ) # collate external tensor files into one onnx.save_model( lowercase__ , lowercase__ , save_as_external_data=lowercase__ , all_tensors_to_one_file=lowercase__ , location="""weights.pb""" , convert_attribute=lowercase__ , ) del pipeline.unet # VAE ENCODER lowerCAmelCase_ :str = pipeline.vae lowerCAmelCase_ :Dict = vae_encoder.config.in_channels lowerCAmelCase_ :Dict = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder lowerCAmelCase_ :Optional[int] = lambda lowercase__ , lowercase__ : vae_encoder.encode(lowercase__ , lowercase__ )[0].sample() onnx_export( lowercase__ , model_args=( torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), False, ) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={ """sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=lowercase__ , ) # VAE DECODER lowerCAmelCase_ :Dict = pipeline.vae lowerCAmelCase_ :Optional[int] = vae_decoder.config.latent_channels lowerCAmelCase_ :List[str] = vae_decoder.config.out_channels # forward only through the decoder part lowerCAmelCase_ :List[str] = vae_encoder.decode onnx_export( lowercase__ , model_args=( torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=lowercase__ , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: lowerCAmelCase_ :Dict = pipeline.safety_checker lowerCAmelCase_ :Dict = safety_checker.config.vision_config.num_channels lowerCAmelCase_ :Any = safety_checker.config.vision_config.image_size lowerCAmelCase_ :Any = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , lowercase__ , lowercase__ , lowercase__ , ).to(device=lowercase__ , dtype=lowercase__ ), torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ), ) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={ """clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, """images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""}, } , opset=lowercase__ , ) del pipeline.safety_checker lowerCAmelCase_ :List[Any] = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" ) lowerCAmelCase_ :Tuple = pipeline.feature_extractor else: lowerCAmelCase_ :str = None lowerCAmelCase_ :str = None lowerCAmelCase_ :Optional[int] = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=lowercase__ , feature_extractor=lowercase__ , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(lowercase__ ) print("""ONNX pipeline saved to""" , lowercase__ ) del pipeline del onnx_pipeline lowerCAmelCase_ :List[str] = OnnxStableDiffusionPipeline.from_pretrained(lowercase__ , provider="""CPUExecutionProvider""" ) print("""ONNX pipeline is loadable""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--model_path', type=str, required=True, help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).', ) parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.') parser.add_argument( '--opset', default=14, type=int, help='The version of the ONNX operator set to use.', ) parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode') __UpperCAmelCase = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
84
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline __SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image'] __SCREAMING_SNAKE_CASE : int = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', ] __SCREAMING_SNAKE_CASE : int = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->int: return 32 @property def __lowerCAmelCase ( self ) ->List[str]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Tuple: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 100 @property def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = text_encoder.eval() return text_encoder @property def __lowerCAmelCase ( self ) ->Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->List[str]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq SCREAMING_SNAKE_CASE : Optional[Any] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str: SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : str = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Dict = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Optional[int] = np.array( [0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : Dict = pipeline( _lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Tuple = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
'''simple docstring''' import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class _snake_case : def __init__( self , a__ , a__=100 , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=4 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.0_2 , a__=3 , a__=None , a__=[0, 1, 2, 3] , ) -> Tuple: '''simple docstring''' snake_case_ = parent snake_case_ = 100 snake_case_ = batch_size snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = scope snake_case_ = out_indices snake_case_ = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case_ = (image_size // patch_size) ** 2 snake_case_ = num_patches + 1 def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) snake_case_ = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Any: '''simple docstring''' snake_case_ = BeitModel(config=a__ ) model.to(a__ ) model.eval() snake_case_ = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Any: '''simple docstring''' snake_case_ = BeitForMaskedImageModeling(config=a__ ) model.to(a__ ) model.eval() snake_case_ = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str: '''simple docstring''' snake_case_ = self.type_sequence_label_size snake_case_ = BeitForImageClassification(a__ ) model.to(a__ ) model.eval() snake_case_ = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ = 1 snake_case_ = BeitForImageClassification(a__ ) model.to(a__ ) model.eval() snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ = model(a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> List[str]: '''simple docstring''' snake_case_ = self.num_labels snake_case_ = BeitForSemanticSegmentation(a__ ) model.to(a__ ) model.eval() snake_case_ = model(a__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) snake_case_ = model(a__ , labels=a__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ): lowerCAmelCase_ : int = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowerCAmelCase_ : Optional[Any] = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[int] = False lowerCAmelCase_ : Tuple = False def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = BeitModelTester(self ) snake_case_ = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 ) def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' pass def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(a__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a__ , nn.Linear ) ) def lowerCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(a__ ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , a__ ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a__ ) def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a__ ) def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a__ ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' if not self.model_tester.is_training: return snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(a__ ), BeitForMaskedImageModeling]: continue snake_case_ = model_class(a__ ) model.to(a__ ) model.train() snake_case_ = self._prepare_for_class(a__ , a__ , return_labels=a__ ) snake_case_ = model(**a__ ).loss loss.backward() def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return snake_case_ = False snake_case_ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(a__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue snake_case_ = model_class(a__ ) model.gradient_checkpointing_enable() model.to(a__ ) model.train() snake_case_ = self._prepare_for_class(a__ , a__ , return_labels=a__ ) snake_case_ = model(**a__ ).loss loss.backward() def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = _config_zero_init(a__ ) for model_class in self.all_model_classes: snake_case_ = model_class(config=a__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = BeitModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) def UpperCamelCase_( ): '''simple docstring''' snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def lowerCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(a__ ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=a__ , return_tensors="pt" ).pixel_values.to(a__ ) # prepare bool_masked_pos snake_case_ = torch.ones((1, 196) , dtype=torch.bool ).to(a__ ) # forward pass with torch.no_grad(): snake_case_ = model(pixel_values=a__ , bool_masked_pos=a__ ) snake_case_ = outputs.logits # verify the logits snake_case_ = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , a__ ) snake_case_ = torch.tensor( [[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(a__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , a__ , atol=1e-2 ) ) @slow def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(a__ ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ ) # forward pass with torch.no_grad(): snake_case_ = model(**a__ ) snake_case_ = outputs.logits # verify the logits snake_case_ = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , a__ ) snake_case_ = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(a__ ) self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1e-4 ) ) snake_case_ = 281 self.assertEqual(logits.argmax(-1 ).item() , a__ ) @slow def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( a__ ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ ) # forward pass with torch.no_grad(): snake_case_ = model(**a__ ) snake_case_ = outputs.logits # verify the logits snake_case_ = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , a__ ) snake_case_ = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(a__ ) self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1e-4 ) ) snake_case_ = 2_396 self.assertEqual(logits.argmax(-1 ).item() , a__ ) @slow def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) snake_case_ = model.to(a__ ) snake_case_ = BeitImageProcessor(do_resize=a__ , size=640 , do_center_crop=a__ ) snake_case_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) snake_case_ = Image.open(ds[0]["file"] ) snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ ) # forward pass with torch.no_grad(): snake_case_ = model(**a__ ) snake_case_ = outputs.logits # verify the logits snake_case_ = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , a__ ) snake_case_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: snake_case_ = torch.tensor( [ [[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]], [[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]], [[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]], ] , device=a__ , ) else: snake_case_ = torch.tensor( [ [[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]], [[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]], [[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]], ] , device=a__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a__ , atol=1e-4 ) ) @slow def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) snake_case_ = model.to(a__ ) snake_case_ = BeitImageProcessor(do_resize=a__ , size=640 , do_center_crop=a__ ) snake_case_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) snake_case_ = Image.open(ds[0]["file"] ) snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ ) # forward pass with torch.no_grad(): snake_case_ = model(**a__ ) snake_case_ = outputs.logits.detach().cpu() snake_case_ = image_processor.post_process_semantic_segmentation(outputs=a__ , target_sizes=[(500, 300)] ) snake_case_ = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , a__ ) snake_case_ = image_processor.post_process_semantic_segmentation(outputs=a__ ) snake_case_ = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , a__ )
85
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def UpperCAmelCase_( a__ , a__=False ): """simple docstring""" SCREAMING_SNAKE_CASE : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def UpperCAmelCase_( a__ , a__ , a__=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE : Any = '''''' else: SCREAMING_SNAKE_CASE : Optional[int] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :] def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a__ , a__ ) def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(a__ , a__ ) def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = val def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = ViTMSNConfig() SCREAMING_SNAKE_CASE : Optional[int] = 1_000 SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files''' SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json''' SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) ) SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : str = idalabel SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = 384 SCREAMING_SNAKE_CASE : Any = 1_536 SCREAMING_SNAKE_CASE : List[str] = 6 elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : Optional[int] = 1_024 SCREAMING_SNAKE_CASE : Optional[int] = 4_096 SCREAMING_SNAKE_CASE : Tuple = 24 SCREAMING_SNAKE_CASE : Union[str, Any] = 16 SCREAMING_SNAKE_CASE : Dict = 0.1 elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : str = 4 elif "l7" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = 7 SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024 SCREAMING_SNAKE_CASE : List[Any] = 4_096 SCREAMING_SNAKE_CASE : List[Any] = 24 SCREAMING_SNAKE_CASE : Tuple = 16 SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder'''] SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(a__ ) SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ ) for src, dest in rename_keys: rename_key(a__ , a__ , a__ ) read_in_q_k_v(a__ , a__ , base_model=a__ ) model.load_state_dict(a__ ) model.eval() SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw ) SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor( size=config.image_size , image_mean=a__ , image_std=a__ ) SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) SCREAMING_SNAKE_CASE : Tuple = model(**a__ ) SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(a__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a__ : Any = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
313
0
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCamelCase__ = TypeVar("""T""") class A__ ( Generic[T]): def __init__( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = data __lowerCAmelCase : Node[T] | None = None def __str__( self ): return f"{self.data}" class A__ ( Generic[T]): def __init__( self ): __lowerCAmelCase : Node[T] | None = None def __iter__( self ): __lowerCAmelCase : Tuple = self.top while node: yield node.data __lowerCAmelCase : Optional[int] = node.next def __str__( self ): return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] ) def __len__( self ): return len(tuple(iter(self ) ) ) def __lowerCamelCase ( self ): return self.top is None def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : int = Node(_SCREAMING_SNAKE_CASE ) if not self.is_empty(): __lowerCAmelCase : Union[str, Any] = self.top __lowerCAmelCase : Optional[int] = node def __lowerCamelCase ( self ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = self.top __lowerCAmelCase : int = self.top.next return pop_node.data def __lowerCamelCase ( self ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = None if __name__ == "__main__": from doctest import testmod testmod()
86
import csv import tweepy # Twitter API credentials a__ : Union[str, Any] = '''''' a__ : List[str] = '''''' a__ : Any = '''''' a__ : List[str] = '''''' def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ ) auth.set_access_token(a__ , a__ ) SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ ) # initialize a list to hold all the tweepy Tweets SCREAMING_SNAKE_CASE : Any = [] # make initial request for most recent tweets (200 is the maximum allowed count) SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 ) # save most recent tweets alltweets.extend(a__ ) # save the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(a__ ) > 0: print(F"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates SCREAMING_SNAKE_CASE : Any = api.user_timeline( screen_name=a__ , count=200 , max_id=a__ ) # save most recent tweets alltweets.extend(a__ ) # update the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1 print(F"""...{len(a__ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(a__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
313
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class snake_case_ ( __A ): __A : str = "cvt" def __init__( self : Dict , lowercase_ : int=3 , lowercase_ : Optional[Any]=[7, 3, 3] , lowercase_ : Dict=[4, 2, 2] , lowercase_ : List[Any]=[2, 1, 1] , lowercase_ : Union[str, Any]=[64, 1_92, 3_84] , lowercase_ : Union[str, Any]=[1, 3, 6] , lowercase_ : Optional[Any]=[1, 2, 10] , lowercase_ : Tuple=[4.0, 4.0, 4.0] , lowercase_ : Tuple=[0.0, 0.0, 0.0] , lowercase_ : str=[0.0, 0.0, 0.0] , lowercase_ : Union[str, Any]=[0.0, 0.0, 0.1] , lowercase_ : int=[True, True, True] , lowercase_ : str=[False, False, True] , lowercase_ : Dict=["dw_bn", "dw_bn", "dw_bn"] , lowercase_ : Any=[3, 3, 3] , lowercase_ : Dict=[1, 1, 1] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=[1, 1, 1] , lowercase_ : Any=[1, 1, 1] , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=1E-12 , **lowercase_ : Any , ) -> List[Any]: super().__init__(**lowercase_ ) lowercase__ : Tuple = num_channels lowercase__ : Optional[Any] = patch_sizes lowercase__ : Optional[int] = patch_stride lowercase__ : Tuple = patch_padding lowercase__ : Dict = embed_dim lowercase__ : str = num_heads lowercase__ : Dict = depth lowercase__ : List[str] = mlp_ratio lowercase__ : Any = attention_drop_rate lowercase__ : Union[str, Any] = drop_rate lowercase__ : Optional[int] = drop_path_rate lowercase__ : Any = qkv_bias lowercase__ : Optional[Any] = cls_token lowercase__ : Optional[Any] = qkv_projection_method lowercase__ : Optional[Any] = kernel_qkv lowercase__ : Optional[int] = padding_kv lowercase__ : Union[str, Any] = stride_kv lowercase__ : str = padding_q lowercase__ : Optional[Any] = stride_q lowercase__ : Tuple = initializer_range lowercase__ : str = layer_norm_eps
87
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : List[str] = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = 'ibert' def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any: super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Dict = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = position_embedding_type SCREAMING_SNAKE_CASE : Optional[int] = quant_mode SCREAMING_SNAKE_CASE : Dict = force_dequant class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
313
0
def a__ ( A_ ): '''simple docstring''' stooge(A_, 0, len(A_ ) - 1 ) return arr def a__ ( A_, A_, A_ ): '''simple docstring''' if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: __magic_name__ , __magic_name__ = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: __magic_name__ = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(A_, A_, (h - t) ) # Recursively sort last 2/3 elements stooge(A_, i + t, (A_) ) # Recursively sort first 2/3 elements stooge(A_, A_, (h - t) ) if __name__ == "__main__": __lowerCAmelCase : List[str] = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : Any = [int(item) for item in user_input.split(',')] print(stooge_sort(unsorted))
88
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType a__ : Any = logging.get_logger(__name__) a__ : Dict = { '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = 'imagegpt' __SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values'] __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str: SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = n_positions SCREAMING_SNAKE_CASE : Optional[int] = n_embd SCREAMING_SNAKE_CASE : List[Any] = n_layer SCREAMING_SNAKE_CASE : List[Any] = n_head SCREAMING_SNAKE_CASE : int = n_inner SCREAMING_SNAKE_CASE : Dict = activation_function SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop SCREAMING_SNAKE_CASE : Dict = embd_pdrop SCREAMING_SNAKE_CASE : List[str] = attn_pdrop SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : int = scale_attn_weights SCREAMING_SNAKE_CASE : Optional[int] = use_cache SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase ) class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]: SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) ) return inputs
313
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = 'cvt' def __init__( self : Union[str, Any] ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : List[str]=[7, 3, 3] ,_UpperCAmelCase : List[str]=[4, 2, 2] ,_UpperCAmelCase : Optional[Any]=[2, 1, 1] ,_UpperCAmelCase : List[str]=[64, 192, 384] ,_UpperCAmelCase : Tuple=[1, 3, 6] ,_UpperCAmelCase : Optional[Any]=[1, 2, 10] ,_UpperCAmelCase : Optional[int]=[4.0, 4.0, 4.0] ,_UpperCAmelCase : Optional[Any]=[0.0, 0.0, 0.0] ,_UpperCAmelCase : List[str]=[0.0, 0.0, 0.0] ,_UpperCAmelCase : List[str]=[0.0, 0.0, 0.1] ,_UpperCAmelCase : Dict=[True, True, True] ,_UpperCAmelCase : Dict=[False, False, True] ,_UpperCAmelCase : str=["dw_bn", "dw_bn", "dw_bn"] ,_UpperCAmelCase : int=[3, 3, 3] ,_UpperCAmelCase : Optional[int]=[1, 1, 1] ,_UpperCAmelCase : List[str]=[2, 2, 2] ,_UpperCAmelCase : List[str]=[1, 1, 1] ,_UpperCAmelCase : Optional[Any]=[1, 1, 1] ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : int=1E-12 ,**_UpperCAmelCase : Union[str, Any] ,): super().__init__(**_UpperCAmelCase ) _a : Any = num_channels _a : List[Any] = patch_sizes _a : int = patch_stride _a : List[Any] = patch_padding _a : Any = embed_dim _a : List[str] = num_heads _a : Tuple = depth _a : int = mlp_ratio _a : List[Any] = attention_drop_rate _a : Optional[int] = drop_rate _a : str = drop_path_rate _a : Tuple = qkv_bias _a : str = cls_token _a : Optional[int] = qkv_projection_method _a : Any = kernel_qkv _a : Any = padding_kv _a : str = stride_kv _a : Tuple = padding_q _a : List[str] = stride_q _a : List[str] = initializer_range _a : Dict = layer_norm_eps
89
from maths.prime_check import is_prime def UpperCAmelCase_( a__ ): """simple docstring""" if not isinstance(a__ , a__ ): SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer""" raise TypeError(a__ ) if is_prime(a__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
313
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) __A = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["BeitFeatureExtractor"] __A = ["BeitImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "BEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "BeitForImageClassification", "BeitForMaskedImageModeling", "BeitForSemanticSegmentation", "BeitModel", "BeitPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "FlaxBeitForImageClassification", "FlaxBeitForMaskedImageModeling", "FlaxBeitModel", "FlaxBeitPreTrainedModel", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
90
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline __SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[str] = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->Optional[Any]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 32 @property def __lowerCAmelCase ( self ) ->str: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Dict: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Tuple: return 100 @property def __lowerCAmelCase ( self ) ->int: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->Any: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : str = self.dummy_unet SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq SCREAMING_SNAKE_CASE : List[str] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int: SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) # create hint SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) ) SCREAMING_SNAKE_CASE : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0 SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo''' SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : List[str] = pipeline( image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
91
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker a__ : List[str] = '''CompVis/stable-diffusion-v1-1''' a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2''' a__ : Any = '''CompVis/stable-diffusion-v1-3''' a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4''' class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str: super()._init_() SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline( vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def __lowerCAmelCase ( self ) ->Dict[str, Any]: return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )} def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[str]: self.enable_attention_slicing(_lowerCamelCase ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict: SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(_lowerCamelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
313
0
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } UpperCamelCase__ = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } UpperCamelCase__ = {"""facebook/blenderbot-3B""": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _a ( ): __lowerCAmelCase = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __lowerCAmelCase = bs[:] __lowerCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE_ ) cs.append(2**8 + n ) n += 1 __lowerCAmelCase = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): __lowerCAmelCase = set() __lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCAmelCase = char return pairs class a__ ( snake_case__ ): _a : str = VOCAB_FILES_NAMES _a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : Union[str, Any] = ["""input_ids""", """attention_mask"""] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): """simple docstring""" __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding="utf-8" ) as vocab_handle: __lowerCAmelCase = json.load(_A ) __lowerCAmelCase = {v: k for k, v in self.encoder.items()} __lowerCAmelCase = errors # how to handle errors in decoding __lowerCAmelCase = bytes_to_unicode() __lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding="utf-8" ) as merges_handle: __lowerCAmelCase = merges_handle.read().split("\n" )[1:-1] __lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] __lowerCAmelCase = dict(zip(_A , range(len(_A ) ) ) ) __lowerCAmelCase = {} __lowerCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowerCAmelCase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return len(self.encoder ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if token in self.cache: return self.cache[token] __lowerCAmelCase = tuple(_A ) __lowerCAmelCase = get_pairs(_A ) if not pairs: return token while True: __lowerCAmelCase = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowerCAmelCase , __lowerCAmelCase = bigram __lowerCAmelCase = [] __lowerCAmelCase = 0 while i < len(_A ): try: __lowerCAmelCase = word.index(_A , _A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowerCAmelCase = j if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCAmelCase = tuple(_A ) __lowerCAmelCase = new_word if len(_A ) == 1: break else: __lowerCAmelCase = get_pairs(_A ) __lowerCAmelCase = " ".join(_A ) __lowerCAmelCase = word return word def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = [] for token in re.findall(self.pat , _A ): __lowerCAmelCase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) ) return bpe_tokens def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.decoder.get(_A ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = "".join(_A ) __lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( _A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowerCAmelCase = os.path.join( _A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + "\n" ) __lowerCAmelCase = 0 with open(_A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __lowerCAmelCase = token_index writer.write(" ".join(_A ) + "\n" ) index += 1 return vocab_file, merge_file def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return [1] + ([0] * len(_A )) + [1] return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1] def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __SCREAMING_SNAKE_CASE( self , _A , _A=False , **_A ): """simple docstring""" __lowerCAmelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()): __lowerCAmelCase = " " + text return (text, kwargs) def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" return token_ids_a + [self.eos_token_id] def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(_A ) __lowerCAmelCase = " ".join(_A ) __lowerCAmelCase = self.encode(_A ) if len(_A ) > self.model_max_length: __lowerCAmelCase = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
92
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : jnp.ndarray @flax_register_to_config class a_ ( nn.Module , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") __SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False __SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280) __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8 __SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None __SCREAMING_SNAKE_CASE : int = 1280 __SCREAMING_SNAKE_CASE : float = 0.0 __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : bool = False def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict: # init input tensors SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size) SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa ) SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"] def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim # input SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types ) # down SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : int = block_out_channels[i] SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = down_blocks # mid SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): SCREAMING_SNAKE_CASE : Dict = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )] SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1 if up_block_type == "CrossAttnUpBlock2D": SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = output_channel SCREAMING_SNAKE_CASE : Tuple = up_blocks # out SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) SCREAMING_SNAKE_CASE : Any = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]: # 1. time if not isinstance(_lowerCamelCase , jnp.ndarray ): SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 ) SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase ) # 2. pre-process SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase ) # 3. down SCREAMING_SNAKE_CASE : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: SCREAMING_SNAKE_CASE : int = () for down_block_res_sample, down_block_additional_residual in zip( _lowerCamelCase , _lowerCamelCase ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples # 4. mid SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :] SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = up_block( _lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , ) else: SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train ) # 6. post-process SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
313
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def _snake_case ( self ): """simple docstring""" lowercase_ : str = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) lowercase_ : Union[str, Any] = get_activation('''gelu''' ) self.assertTrue(torch.allclose(gelu_python(__SCREAMING_SNAKE_CASE ) , torch_builtin(__SCREAMING_SNAKE_CASE ) ) ) self.assertFalse(torch.allclose(gelu_python(__SCREAMING_SNAKE_CASE ) , gelu_new(__SCREAMING_SNAKE_CASE ) ) ) def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[Any] = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) lowercase_ : Tuple = get_activation('''gelu''' ) lowercase_ : Any = get_activation('''gelu_10''' ) lowercase_ : List[str] = torch_builtin(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = geluaa(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(__SCREAMING_SNAKE_CASE ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _snake_case ( self ): """simple docstring""" get_activation('''gelu''' ) get_activation('''gelu_10''' ) get_activation('''gelu_fast''' ) get_activation('''gelu_new''' ) get_activation('''gelu_python''' ) get_activation('''gelu_pytorch_tanh''' ) get_activation('''linear''' ) get_activation('''mish''' ) get_activation('''quick_gelu''' ) get_activation('''relu''' ) get_activation('''sigmoid''' ) get_activation('''silu''' ) get_activation('''swish''' ) get_activation('''tanh''' ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): get_activation('''bogus''' ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): get_activation(__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : int = get_activation('''gelu''' ) lowercase_ : Any = 1 lowercase_ : str = get_activation('''gelu''' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): lowercase_ : Dict = acta.a
93
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a_ ( a__ , a__ , a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline __SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS __SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __SCREAMING_SNAKE_CASE : Any = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : Tuple = frozenset([] ) def __lowerCAmelCase ( self ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : Optional[Any] = 32 SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size # image encoding components SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL() SCREAMING_SNAKE_CASE : Optional[Any] = { # image encoding components '''feature_extractor''': feature_extractor, '''image_encoder''': image_encoder.eval(), # image noising components '''image_normalizer''': image_normalizer.eval(), '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder.eval(), '''unet''': unet.eval(), '''scheduler''': scheduler, '''vae''': vae.eval(), } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]: if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if pil_image: SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase ) inputs.update({'''image_embeds''': None} ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps'''] self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) ->Optional[int]: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' ) SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' ) SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[Any]: SCREAMING_SNAKE_CASE : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) SCREAMING_SNAKE_CASE : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' ) SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' ) SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Dict = pipe( _lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
313
0
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE__ = 'Pix2StructImageProcessor' SCREAMING_SNAKE_CASE__ = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = False super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 2048 , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ): if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None and not self.image_processor.is_vqa: a :int = self.tokenizer a :int = self.tokenizer( text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values a :Optional[Any] = self.image_processor( _lowerCamelCase , return_tensors=_lowerCamelCase , max_patches=_lowerCamelCase , **_lowerCamelCase ) else: # add pixel_values and bbox a :Optional[int] = self.image_processor( _lowerCamelCase , return_tensors=_lowerCamelCase , max_patches=_lowerCamelCase , header_text=_lowerCamelCase , **_lowerCamelCase ) if text is not None and not self.image_processor.is_vqa: a :Any = self.tokenizer( text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , ) if "attention_mask" in text_encoding: a :List[str] = text_encoding.pop('''attention_mask''' ) if "input_ids" in text_encoding: a :List[Any] = text_encoding.pop('''input_ids''' ) else: a :str = None if text_encoding is not None: encoding_image_processor.update(_lowerCamelCase ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = self.tokenizer.model_input_names a :Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
94
from abc import ABC, abstractmethod from typing import List, Optional class a_ ( a__ ): """simple docstring""" def __init__( self ) ->List[str]: # test for the above condition self.test() def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = False while not completed: if counter == 1: self.reset() SCREAMING_SNAKE_CASE : List[Any] = self.advance() if not self.does_advance(_lowerCamelCase ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase ) counter += 1 if counter > 1_0000: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def __lowerCAmelCase ( self ) ->Optional[int]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self ) ->Union[str, Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase ) ->int: super(_lowerCamelCase , self ).__init__() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" ) if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" ) SCREAMING_SNAKE_CASE : Optional[Any] = token_ids SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids ) SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step SCREAMING_SNAKE_CASE : Any = False def __lowerCAmelCase ( self ) ->List[Any]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : str = False SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[Any] = False if self.does_advance(_lowerCamelCase ): self.fulfilled_idx += 1 SCREAMING_SNAKE_CASE : str = True if self.fulfilled_idx == (self.seqlen - 1): SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : Union[str, Any] = completed else: # failed to make progress. SCREAMING_SNAKE_CASE : Dict = True self.reset() return stepped, completed, reset def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = 0 def __lowerCAmelCase ( self ) ->Any: return self.seqlen - (self.fulfilled_idx + 1) def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict: SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : Dict = self.seqlen SCREAMING_SNAKE_CASE : int = self.fulfilled_idx SCREAMING_SNAKE_CASE : Tuple = self.completed return new_constraint class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict: SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] ) SCREAMING_SNAKE_CASE : List[str] = {} for token_ids in nested_token_ids: SCREAMING_SNAKE_CASE : Optional[Any] = root for tidx, token_id in enumerate(_lowerCamelCase ): if token_id not in level: SCREAMING_SNAKE_CASE : Any = {} SCREAMING_SNAKE_CASE : Tuple = level[token_id] if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' F""" {nested_token_ids}.""" ) SCREAMING_SNAKE_CASE : List[Any] = root def __lowerCAmelCase ( self , _lowerCamelCase ) ->int: SCREAMING_SNAKE_CASE : List[Any] = self.trie for current_token in current_seq: SCREAMING_SNAKE_CASE : int = start[current_token] SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() ) return next_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase ) return len(_lowerCamelCase ) == 0 def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: SCREAMING_SNAKE_CASE : Any = list(root.values() ) if len(_lowerCamelCase ) == 0: return 1 else: return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase ) return len(_lowerCamelCase ) != leaf_count class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase ) ->str: super(_lowerCamelCase , self ).__init__() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" ) if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ): raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" ) if any( any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" ) SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = nested_token_ids SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Optional[int] = False def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq ) if len(_lowerCamelCase ) == 0: return None else: return token_list def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = False if self.does_advance(_lowerCamelCase ): self.current_seq.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = True else: SCREAMING_SNAKE_CASE : Dict = True self.reset() SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq ) SCREAMING_SNAKE_CASE : List[Any] = completed return stepped, completed, reset def __lowerCAmelCase ( self ) ->Optional[Any]: SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[Any] = [] def __lowerCAmelCase ( self ) ->Optional[Any]: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]: SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : str = self.seqlen SCREAMING_SNAKE_CASE : int = self.current_seq SCREAMING_SNAKE_CASE : Optional[int] = self.completed return new_constraint class a_ : """simple docstring""" def __init__( self , _lowerCamelCase ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : List[Any] = constraints # max # of steps required to fulfill a given constraint SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] ) SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = False self.init_state() def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints] def __lowerCAmelCase ( self ) ->str: SCREAMING_SNAKE_CASE : str = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Tuple = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance() if isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.append(_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.extend(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance() if isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.append(_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.extend(_lowerCamelCase ) if len(_lowerCamelCase ) == 0: return None else: return token_list def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False if self.completed: SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Optional[int] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Optional[int] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) SCREAMING_SNAKE_CASE : str = None if len(self.pending_constraints ) == 0: # we're done! SCREAMING_SNAKE_CASE : Optional[Any] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_lowerCamelCase ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = None if not complete and stepped: SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. SCREAMING_SNAKE_CASE : str = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str: SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: SCREAMING_SNAKE_CASE : str = [ constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints] return new_state
313
0
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ): """simple docstring""" a__ : Union[str, Any] =word.split() def justify(SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str: a__ : Optional[Any] =max_width - width a__ : Optional[int] =len(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: a__ : List[str] =words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] a__ : Union[str, Any] =spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] a__ : Union[str, Any] =( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(SCREAMING_SNAKE_CASE ): num_spaces_between_words_list[i] += 1 a__ : Any =[] for i in range(SCREAMING_SNAKE_CASE ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * " " ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(SCREAMING_SNAKE_CASE ) a__ : Optional[Any] =[] a__ : list[str] =[] a__ : Tuple =0 for word in words: if width + len(SCREAMING_SNAKE_CASE ) + len(SCREAMING_SNAKE_CASE ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(SCREAMING_SNAKE_CASE ) width += len(SCREAMING_SNAKE_CASE ) else: # justify the line and add it to result answer.append(justify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) # reset new line and new width a__ , a__ : str =[word], len(SCREAMING_SNAKE_CASE ) a__ : List[Any] =max_width - width - len(SCREAMING_SNAKE_CASE ) answer.append(" ".join(SCREAMING_SNAKE_CASE ) + (remaining_spaces + 1) * " " ) return answer if __name__ == "__main__": from doctest import testmod testmod()
95
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ): """simple docstring""" set_seed(3 ) # generate train_data and objective_set SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets( a__ , a__ , number=a__ , min_len=1_026 , trim=a__ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # load pretrained model SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ ) print('''computing perplexity on objective set''' ) SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item() print('''perplexity on objective set:''' , a__ ) # collect igf pairs and save to file demo.jbl collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ): """simple docstring""" set_seed(42 ) # Load pre-trained model SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' ) # Initialize secondary learner to use embedding weights of model SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ ) # Train secondary learner SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner( a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ ) SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ ) SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1 SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ ) model.train() if secondary_learner is not None: secondary_learner.to(a__ ) secondary_learner.eval() SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Tuple = [] # Compute the performance of the transformer model at the beginning SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ ) test_perps.append(a__ ) print('''Test perplexity, step''' , a__ , ''':''' , a__ ) for epoch in range(int(a__ ) ): for step, example in enumerate(a__ ): torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 ) SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = True if secondary_learner is not None: SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward( torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(a__ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: SCREAMING_SNAKE_CASE : str = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) SCREAMING_SNAKE_CASE : List[str] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ ) test_perps.append(a__ ) print('''Test perplexity, step''' , a__ , ''':''' , a__ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , a__ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def UpperCAmelCase_( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' ) # Required parameters parser.add_argument( '''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , ) parser.add_argument( '''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--data_file''' , type=a__ , default=a__ , help=( '''A jbl file containing tokenized data which can be split as objective dataset, ''' '''train_dataset and test_dataset.''' ) , ) parser.add_argument( '''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , ) parser.add_argument( '''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , ) parser.add_argument( '''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' ) parser.add_argument( '''--context_len''' , default=32 , type=a__ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , ) parser.add_argument( '''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' ) parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' ) parser.add_argument( '''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , ) parser.add_argument( '''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' ) parser.add_argument( '''--eval_interval''' , default=10 , type=a__ , help=( '''decay the selectivity of our secondary learner filter from''' '''1 standard deviation above average to 1 below average after 10 batches''' ) , ) parser.add_argument( '''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' ) parser.add_argument( '''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' ) parser.add_argument( '''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' ) parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' ) parser.add_argument( '''--threshold''' , default=1.0 , type=a__ , help=( '''The threshold value used by secondary learner to filter the train_data and allow only''' ''' informative data as input to the model''' ) , ) parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' ) parser.add_argument( '''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , ) # Load train data for secondary learner SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' ) # Train secondary learner SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner( a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , ) # load pretrained gpt2 model SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets( context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , ) if __name__ == "__main__": main()
313
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() ) SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params a__ : Any = logging.getLogger(__name__) def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if metric == "rouge2": SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}''' elif metric == "loss": SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}''' else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ''' function.''' ) SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint( dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return EarlyStopping( monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , ) class a_ ( pl.Callback ): """simple docstring""" def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_lowerCamelCase ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None: logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir ) if type_path == "test": SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt''' SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt""" SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=_lowerCamelCase ) generations_file.parent.mkdir(exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , '''a+''' ) as writer: for key in sorted(_lowerCamelCase ): if key in ["log", "progress_bar", "preds"]: continue SCREAMING_SNAKE_CASE : Tuple = metrics[key] if isinstance(_lowerCamelCase , torch.Tensor ): SCREAMING_SNAKE_CASE : List[Any] = val.item() SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n""" writer.write(_lowerCamelCase ) if not save_generations: return if "preds" in metrics: SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(_lowerCamelCase ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: try: SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters() except AttributeError: SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters() SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
313
0
'''simple docstring''' from math import factorial class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Any = real if isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase__ :Optional[int] = [1] * rank else: UpperCamelCase__ :Union[str, Any] = rank def __repr__( self ): '''simple docstring''' return ( F'''{self.real}+''' F'''{"+".join(str(UpperCamelCase_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}''' ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , UpperCamelCase_ ) def __add__( self , UpperCamelCase_ ): '''simple docstring''' if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): return Dual(self.real + other , self.duals ) UpperCamelCase__ :str = self.duals.copy() UpperCamelCase__ :List[Any] = other.duals.copy() if len(UpperCamelCase_ ) > len(UpperCamelCase_ ): o_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) ) elif len(UpperCamelCase_ ) < len(UpperCamelCase_ ): s_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) ) UpperCamelCase__ :Tuple = [] for i in range(len(UpperCamelCase_ ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , UpperCamelCase_ ) _a = __add__ def __sub__( self , UpperCamelCase_ ): '''simple docstring''' return self + other * -1 def __mul__( self , UpperCamelCase_ ): '''simple docstring''' if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase__ :Dict = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , UpperCamelCase_ ) UpperCamelCase__ :Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , UpperCamelCase_ ) _a = __mul__ def __truediv__( self , UpperCamelCase_ ): '''simple docstring''' if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase__ :str = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , UpperCamelCase_ ) raise ValueError def __floordiv__( self , UpperCamelCase_ ): '''simple docstring''' if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase__ :Optional[Any] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , UpperCamelCase_ ) raise ValueError def __pow__( self , UpperCamelCase_ ): '''simple docstring''' if n < 0 or isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError('''power must be a positive integer''' ) if n == 0: return 1 if n == 1: return self UpperCamelCase__ :List[Any] = self for _ in range(n - 1 ): x *= self return x def a ( __a , __a , __a ) -> Any: '''simple docstring''' if not callable(__a ): raise ValueError('''differentiate() requires a function as input for func''' ) if not isinstance(__a , (float, int) ): raise ValueError('''differentiate() requires a float as input for position''' ) if not isinstance(__a , __a ): raise ValueError('''differentiate() requires an int as input for order''' ) UpperCamelCase__ :Union[str, Any] = Dual(__a , 1 ) UpperCamelCase__ :List[Any] = func(__a ) if order == 0: return result.real return result.duals[order - 1] * factorial(__a ) if __name__ == "__main__": import doctest doctest.testmod() def a ( __a ) -> Optional[int]: '''simple docstring''' return y**2 * y**4 print(differentiate(f, 9, 2))
97
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def UpperCAmelCase_( a__ ): """simple docstring""" if ( (cp >= 0x4_E00 and cp <= 0x9_FFF) or (cp >= 0x3_400 and cp <= 0x4_DBF) # or (cp >= 0x20_000 and cp <= 0x2A_6DF) # or (cp >= 0x2A_700 and cp <= 0x2B_73F) # or (cp >= 0x2B_740 and cp <= 0x2B_81F) # or (cp >= 0x2B_820 and cp <= 0x2C_EAF) # or (cp >= 0xF_900 and cp <= 0xF_AFF) or (cp >= 0x2F_800 and cp <= 0x2F_A1F) # ): # return True return False def UpperCAmelCase_( a__ ): """simple docstring""" for char in word: SCREAMING_SNAKE_CASE : str = ord(a__ ) if not _is_chinese_char(a__ ): return 0 return 1 def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = set() for token in tokens: SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ ) if chinese_word: word_set.add(a__ ) SCREAMING_SNAKE_CASE : str = list(a__ ) return word_list def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if not chinese_word_set: return bert_tokens SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] ) SCREAMING_SNAKE_CASE : Tuple = bert_tokens SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ ) while start < end: SCREAMING_SNAKE_CASE : Dict = True if is_chinese(bert_word[start] ): SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ ) for i in range(a__ , 1 , -1 ): SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j] SCREAMING_SNAKE_CASE : List[str] = start + i SCREAMING_SNAKE_CASE : Optional[Any] = False break if single_word: start += 1 return bert_word def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] for i in range(0 , len(a__ ) , 100 ): SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0] SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res] ltp_res.extend(a__ ) assert len(a__ ) == len(a__ ) SCREAMING_SNAKE_CASE : Any = [] for i in range(0 , len(a__ ) , 100 ): SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(a__ ) == len(a__ ) SCREAMING_SNAKE_CASE : int = [] for input_ids, chinese_word in zip(a__ , a__ ): SCREAMING_SNAKE_CASE : List[Any] = [] for id in input_ids: SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ ) input_tokens.append(a__ ) SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ ) SCREAMING_SNAKE_CASE : Dict = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(a__ ): if token[:2] == "##": SCREAMING_SNAKE_CASE : Optional[int] = token[2:] # save chinese tokens' pos if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ): ref_id.append(a__ ) ref_ids.append(a__ ) assert len(a__ ) == len(a__ ) return ref_ids def UpperCAmelCase_( a__ ): """simple docstring""" with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : List[str] = f.readlines() SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert ) SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids] f.writelines(a__ ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') a__ : int = parser.parse_args() main(args)
313
0
"""simple docstring""" def a_ ( lowerCamelCase , lowerCamelCase ): return abs(lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a , lowerCamelCase ) def a_ ( lowerCamelCase , lowerCamelCase ): while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase__ , UpperCAmelCase__ = y, x % y return abs(lowerCamelCase ) def a_ ( ): try: UpperCAmelCase__ = input('Enter two integers separated by comma (,): ' ).split(',' ) UpperCAmelCase__ = int(nums[0] ) UpperCAmelCase__ = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(lowerCamelCase , lowerCamelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase , lowerCamelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('Wrong input' ) if __name__ == "__main__": main()
98
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}""" SCREAMING_SNAKE_CASE : Tuple = '''1''' SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le''' SCREAMING_SNAKE_CASE : List[Any] = [ '''ffmpeg''', '''-i''', '''pipe:0''', '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] try: with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ ) except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0] SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa ) if audio.shape[0] == 0: raise ValueError('''Malformed soundfile''' ) return audio def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}""" SCREAMING_SNAKE_CASE : Dict = '''1''' if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE : List[Any] = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE : Dict = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system() if system == "Linux": SCREAMING_SNAKE_CASE : Dict = '''alsa''' SCREAMING_SNAKE_CASE : Any = '''default''' elif system == "Darwin": SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation''' SCREAMING_SNAKE_CASE : Optional[int] = ''':0''' elif system == "Windows": SCREAMING_SNAKE_CASE : int = '''dshow''' SCREAMING_SNAKE_CASE : Any = '''default''' SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''ffmpeg''', '''-f''', format_, '''-i''', input_, '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-fflags''', '''nobuffer''', '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ ) for item in iterator: yield item def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ): """simple docstring""" if stream_chunk_s is not None: SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s else: SCREAMING_SNAKE_CASE : List[str] = chunk_length_s SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ ) if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE : Optional[int] = np.intaa SCREAMING_SNAKE_CASE : List[Any] = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE : Any = np.floataa SCREAMING_SNAKE_CASE : Union[str, Any] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6 SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(a__ , (int, float) ): SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s] SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now() SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ ) for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ): # Put everything back in numpy scale SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = ( item['''stride'''][0] // size_of_sample, item['''stride'''][1] // size_of_sample, ) SCREAMING_SNAKE_CASE : Any = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = b'''''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride if stride_left + stride_right >= chunk_len: raise ValueError( F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for raw in iterator: acc += raw if stream and len(a__ ) < chunk_len: SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(a__ ) >= chunk_len: # We are flushing the accumulator SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right) SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride} if stream: SCREAMING_SNAKE_CASE : List[str] = False yield item SCREAMING_SNAKE_CASE : Dict = stride_left SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(a__ ) > stride_left: SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)} if stream: SCREAMING_SNAKE_CASE : Union[str, Any] = False yield item def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo try: with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process: while True: SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
313
0
import os import pytest from attr import dataclass lowercase : Any = """us-east-1""" # defaults region @dataclass class A__ : """simple docstring""" __A : str __A : str = '''arn:aws:iam::558105141721:role/sagemaker_execution_role''' __A : Optional[Any] = { '''task_name''': '''mnli''', '''per_device_train_batch_size''': 1_6, '''per_device_eval_batch_size''': 1_6, '''do_train''': True, '''do_eval''': True, '''do_predict''': True, '''output_dir''': '''/opt/ml/model''', '''overwrite_output_dir''': True, '''max_steps''': 5_0_0, '''save_steps''': 5_5_0_0, } __A : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0} @property def __lowercase ( self) -> str: '''simple docstring''' if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowercase ( self) -> str: '''simple docstring''' return F'{self.framework}-transfromers-test' @property def __lowercase ( self) -> str: '''simple docstring''' return F'./tests/sagemaker/scripts/{self.framework}' @property def __lowercase ( self) -> str: '''simple docstring''' if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def A_ ( A__ ) -> Dict: a__ : int = SageMakerTestEnvironment(framework=request.cls.framework )
99
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
313
0
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ): """simple docstring""" __lowercase : List[Any] = DanceDiffusionPipeline __lowercase : Dict = UNCONDITIONAL_AUDIO_GENERATION_PARAMS __lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - { '''callback''', '''latents''', '''callback_steps''', '''output_type''', '''num_images_per_prompt''', } __lowercase : str = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS __lowercase : List[Any] = False __lowercase : str = False def snake_case_ ( self): torch.manual_seed(0) __SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCAmelCase__ , use_timestep_embedding=lowerCAmelCase__ , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , ) __SCREAMING_SNAKE_CASE = IPNDMScheduler() __SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, } return components def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0): if str(lowerCAmelCase__).startswith("""mps"""): __SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = { """batch_size""": 1, """generator""": generator, """num_inference_steps""": 4, } return inputs def snake_case_ ( self): __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = output.audios __SCREAMING_SNAKE_CASE = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) __SCREAMING_SNAKE_CASE = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1E-2 @skip_mps def snake_case_ ( self): return super().test_save_load_local() @skip_mps def snake_case_ ( self): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3) @skip_mps def snake_case_ ( self): return super().test_save_load_optional_components() @skip_mps def snake_case_ ( self): return super().test_attention_slicing_forward_pass() def snake_case_ ( self): super().test_inference_batch_single_identical(expected_max_diff=3E-3) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self): __SCREAMING_SNAKE_CASE = torch_device __SCREAMING_SNAKE_CASE = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""") __SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = torch.manual_seed(0) __SCREAMING_SNAKE_CASE = pipe(generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96) __SCREAMING_SNAKE_CASE = output.audios __SCREAMING_SNAKE_CASE = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) __SCREAMING_SNAKE_CASE = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1E-2 def snake_case_ ( self): __SCREAMING_SNAKE_CASE = torch_device __SCREAMING_SNAKE_CASE = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa) __SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = torch.manual_seed(0) __SCREAMING_SNAKE_CASE = pipe(generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96) __SCREAMING_SNAKE_CASE = output.audios __SCREAMING_SNAKE_CASE = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) __SCREAMING_SNAKE_CASE = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1E-2
100
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a__ : int = logging.get_logger(__name__) a__ : Optional[Any] = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr' __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' ) SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = use_timm_backbone SCREAMING_SNAKE_CASE : Optional[int] = backbone_config SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = num_queries SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : Optional[int] = d_model SCREAMING_SNAKE_CASE : str = encoder_ffn_dim SCREAMING_SNAKE_CASE : str = encoder_layers SCREAMING_SNAKE_CASE : str = encoder_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim SCREAMING_SNAKE_CASE : int = decoder_layers SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads SCREAMING_SNAKE_CASE : List[str] = dropout SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout SCREAMING_SNAKE_CASE : str = activation_dropout SCREAMING_SNAKE_CASE : Optional[int] = activation_function SCREAMING_SNAKE_CASE : Optional[int] = init_std SCREAMING_SNAKE_CASE : List[str] = init_xavier_std SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type SCREAMING_SNAKE_CASE : str = backbone SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Dict = dilation # deformable attributes SCREAMING_SNAKE_CASE : str = num_feature_levels SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points SCREAMING_SNAKE_CASE : Any = decoder_n_points SCREAMING_SNAKE_CASE : str = two_stage SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals SCREAMING_SNAKE_CASE : Dict = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher SCREAMING_SNAKE_CASE : int = class_cost SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost SCREAMING_SNAKE_CASE : Optional[int] = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient SCREAMING_SNAKE_CASE : Tuple = focal_alpha SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def __lowerCAmelCase ( self ) ->int: return self.encoder_attention_heads @property def __lowerCAmelCase ( self ) ->int: return self.d_model def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
313
0
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' # Load configuration defined in the metadata file with open(lowerCAmelCase__ ) as metadata_file: lowercase = json.load(lowerCAmelCase__ ) lowercase = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''module'''] # Load the entity vocab file lowercase = load_original_entity_vocab(lowerCAmelCase__ ) # add an entry for [MASK2] lowercase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 lowercase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks lowercase = AddedToken('''<ent>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) lowercase = AddedToken('''<ent2>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(lowerCAmelCase__ ) with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''r''' ) as f: lowercase = json.load(lowerCAmelCase__ ) lowercase = '''MLukeTokenizer''' with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = MLukeTokenizer.from_pretrained(lowerCAmelCase__ ) # Initialize the embeddings of the special tokens lowercase = tokenizer.convert_tokens_to_ids(['''@'''] )[0] lowercase = tokenizer.convert_tokens_to_ids(['''#'''] )[0] lowercase = state_dict['''embeddings.word_embeddings.weight'''] lowercase = word_emb[ent_init_index].unsqueeze(0 ) lowercase = word_emb[enta_init_index].unsqueeze(0 ) lowercase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: lowercase = state_dict[bias_name] lowercase = decoder_bias[ent_init_index].unsqueeze(0 ) lowercase = decoder_bias[enta_init_index].unsqueeze(0 ) lowercase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: lowercase = f'encoder.layer.{layer_index}.attention.self.' lowercase = state_dict[prefix + matrix_name] lowercase = state_dict[prefix + matrix_name] lowercase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks lowercase = state_dict['''entity_embeddings.entity_embeddings.weight'''] lowercase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) lowercase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' lowercase = state_dict['''entity_predictions.bias'''] lowercase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) lowercase = torch.cat([entity_prediction_bias, entity_mask_bias] ) lowercase = LukeForMaskedLM(config=lowerCAmelCase__ ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) lowercase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): lowercase = state_dict[key] else: lowercase = state_dict[key] lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}: raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' ) if set(lowerCAmelCase__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs lowercase = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task='''entity_classification''' ) lowercase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' lowercase = (0, 9) lowercase = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' ) lowercase = model(**lowerCAmelCase__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base lowercase = torch.Size((1, 33, 768) ) lowercase = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base lowercase = torch.Size((1, 1, 768) ) lowercase = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' f' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction lowercase = MLukeTokenizer.from_pretrained(lowerCAmelCase__ ) lowercase = '''Tokyo is the capital of <mask>.''' lowercase = (24, 30) lowercase = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' ) lowercase = model(**lowerCAmelCase__ ) lowercase = encoding['''input_ids'''][0].tolist() lowercase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) lowercase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowerCAmelCase__ ) lowercase = outputs.entity_logits[0][0].argmax().item() lowercase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(lowerCAmelCase__ ) ) model.save_pretrained(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] lowercase = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )] lowercase = {} for entry in data: lowercase = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: lowercase = entity_id break lowercase = f'{language}:{entity_name}' lowercase = entity_id return new_mapping if __name__ == "__main__": lowercase__ :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) lowercase__ :int = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
101
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,) __SCREAMING_SNAKE_CASE : Optional[int] = 10 def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**_lowerCamelCase ) return config def __lowerCAmelCase ( self ) ->Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->int: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : int = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2 assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3 def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model() SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model() SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = output.prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
313
0
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def lowercase ( _snake_case : int="ro" , _snake_case : Dict="en" , _snake_case : int="wmt16" , _snake_case : List[str]=None ) ->None: """simple docstring""" try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''' ) __snake_case : Union[str, Any] = f"""{src_lang}-{tgt_lang}""" print(f"""Converting {dataset}-{pair}""" ) __snake_case : Optional[Any] = datasets.load_dataset(_snake_case , _snake_case ) if save_dir is None: __snake_case : int = f"""{dataset}-{pair}""" __snake_case : Union[str, Any] = Path(_snake_case ) save_dir.mkdir(exist_ok=_snake_case ) for split in ds.keys(): print(f"""Splitting {split} with {ds[split].num_rows} records""" ) # to save to val.source, val.target like summary datasets __snake_case : Union[str, Any] = '''val''' if split == '''validation''' else split __snake_case : List[str] = save_dir.joinpath(f"""{fn}.source""" ) __snake_case : int = save_dir.joinpath(f"""{fn}.target""" ) __snake_case : Union[str, Any] = src_path.open('''w+''' ) __snake_case : Union[str, Any] = tgt_path.open('''w+''' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): __snake_case : List[str] = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''' ) tgt_fp.write(ex[tgt_lang] + '''\n''' ) print(f"""Saved {dataset} dataset to {save_dir}""" ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
102
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer a__ : Dict = logging.get_logger(__name__) a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} a__ : str = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } a__ : Optional[int] = { '''allenai/led-base-16384''': 16_384, } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer __SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]: super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` SCREAMING_SNAKE_CASE : List[Any] = '''post_processor''' SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] ) if "cls" in state: SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] ) SCREAMING_SNAKE_CASE : Any = False if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space SCREAMING_SNAKE_CASE : Union[str, Any] = True if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets: SCREAMING_SNAKE_CASE : List[Any] = trim_offsets SCREAMING_SNAKE_CASE : Union[str, Any] = True if changes_to_apply: SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def __lowerCAmelCase ( self ) ->str: if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]: SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value SCREAMING_SNAKE_CASE : List[Any] = value def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding: SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding: SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]: SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any: SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]: SCREAMING_SNAKE_CASE : Any = [self.sep_token_id] SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict: SCREAMING_SNAKE_CASE : Tuple = super()._pad( encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase ) if needs_to_be_padded: SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` SCREAMING_SNAKE_CASE : str = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
313
0
from heapq import heappop, heappush import numpy as np def UpperCamelCase( __UpperCamelCase : np.ndarray ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : bool ,): lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = grid.shape lowerCAmelCase_ : int = [-1, 1, 0, 0] lowerCAmelCase_ : Optional[Any] = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] lowerCAmelCase_ , lowerCAmelCase_ : Tuple = [(0, source)], set() lowerCAmelCase_ : int = np.full((rows, cols) ,np.inf ) lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Tuple = np.empty((rows, cols) ,dtype=__UpperCamelCase ) lowerCAmelCase_ : Any = None while queue: ((lowerCAmelCase_) , (lowerCAmelCase_)) : Dict = heappop(__UpperCamelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: lowerCAmelCase_ : Any = [] while (x, y) != source: path.append((x, y) ) lowerCAmelCase_ , lowerCAmelCase_ : Any = predecessors[x, y] path.append(__UpperCamelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__UpperCamelCase ) ): lowerCAmelCase_ , lowerCAmelCase_ : str = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: lowerCAmelCase_ : Union[str, Any] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__UpperCamelCase ,(dist + 1, (nx, ny)) ) lowerCAmelCase_ : Any = dist + 1 lowerCAmelCase_ : Union[str, Any] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
103
from __future__ import annotations import math def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) SCREAMING_SNAKE_CASE : Dict = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(a__ ) ) ] def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(a__ ) ) ] def UpperCAmelCase_( a__ ): """simple docstring""" if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) SCREAMING_SNAKE_CASE : str = len(a__ ) SCREAMING_SNAKE_CASE : Any = matrix_length // 2 SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )] SCREAMING_SNAKE_CASE : Optional[int] = [ [a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ ) ] SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )] SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )] return top_left, top_right, bot_left, bot_right def UpperCAmelCase_( a__ ): """simple docstring""" return len(a__ ), len(matrix[0] ) def UpperCAmelCase_( a__ ): """simple docstring""" print('''\n'''.join(str(a__ ) for line in matrix ) ) def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if matrix_dimensions(a__ ) == (2, 2): return default_matrix_multiplication(a__ , a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ ) SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) ) SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ ) SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ ) SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ ) # construct the new matrix from our 4 quadrants SCREAMING_SNAKE_CASE : Optional[Any] = [] for i in range(len(a__ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(a__ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]: SCREAMING_SNAKE_CASE : Any = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(a__ ) SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ ) SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) ) SCREAMING_SNAKE_CASE : Optional[int] = matrixa SCREAMING_SNAKE_CASE : Tuple = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , a__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ ) # Removing the additional zeros for i in range(0 , a__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": a__ : Dict = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
313
0
'''simple docstring''' def _A ( A__ , A__ ): """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowercase = str(bin(A__ ) )[2:] # remove the leading "0b" __lowercase = str(bin(A__ ) )[2:] __lowercase = max(len(A__ ) , len(A__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
104
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''') class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any: SCREAMING_SNAKE_CASE : str = scheduler SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers] SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer SCREAMING_SNAKE_CASE : List[str] = GradientState() def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes for _ in range(_lowerCamelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) else: self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Union[str, Any]: return self.scheduler.get_last_lr() def __lowerCAmelCase ( self ) ->List[str]: return self.scheduler.state_dict() def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: self.scheduler.load_state_dict(_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: return self.scheduler.get_lr() def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]: return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
313
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->list: '''simple docstring''' if n_term == "": return [] a : list = [] for temp in range(int(_lowercase ) ): series.append(F"""1/{temp + 1}""" if series else "1" ) return series if __name__ == "__main__": a : Any = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
105
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a__ : Optional[Any] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['''memory_attention''', '''encoder_attn'''], ['''attention''', '''attn'''], ['''/''', '''.'''], ['''.LayerNorm.gamma''', '''_layer_norm.weight'''], ['''.LayerNorm.beta''', '''_layer_norm.bias'''], ['''r.layer_''', '''r.layers.'''], ['''output_proj''', '''out_proj'''], ['''ffn.dense_1.''', '''fc2.'''], ['''ffn.dense.''', '''fc1.'''], ['''ffn_layer_norm''', '''final_layer_norm'''], ['''kernel''', '''weight'''], ['''encoder_layer_norm.''', '''encoder.layer_norm.'''], ['''decoder_layer_norm.''', '''decoder.layer_norm.'''], ['''embeddings.weights''', '''shared.weight'''], ] def UpperCAmelCase_( a__ ): """simple docstring""" for pegasus_name, hf_name in PATTERNS: SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ ) return k def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy() cfg_kwargs.update(a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ ) SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ ) SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict() SCREAMING_SNAKE_CASE : List[str] = {} for k, v in tf_weights.items(): SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: SCREAMING_SNAKE_CASE : Dict = v.T SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) SCREAMING_SNAKE_CASE : int = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ ) SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ): SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name ) if skip_key: continue SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ ) SCREAMING_SNAKE_CASE : Any = array return tf_weights def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(a__ ) # convert model SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ ) SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": SCREAMING_SNAKE_CASE : int = task_specific_params SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ ) torch_model.save_pretrained(a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') a__ : List[str] = parser.parse_args() if args.save_dir is None: a__ : Any = Path(args.tf_ckpt_path).parent.name a__ : int = os.path.join('''pegasus''', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
313
0
"""simple docstring""" import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ): if isinstance(A_ , A_ ): lowerCAmelCase__ : List[Any] = np.full((len(A_ ), sequence_length, 2) , A_ ) else: lowerCAmelCase__ : List[str] = np.full((len(A_ ), sequence_length) , A_ ) for i, tensor in enumerate(A_ ): if padding_side == "right": if isinstance(A_ , A_ ): lowerCAmelCase__ : Any = tensor[:sequence_length] else: lowerCAmelCase__ : Optional[int] = tensor[:sequence_length] else: if isinstance(A_ , A_ ): lowerCAmelCase__ : str = tensor[:sequence_length] else: lowerCAmelCase__ : Optional[Any] = tensor[:sequence_length] return out_tensor.tolist() def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : Tuple = ord(A_ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True lowerCAmelCase__ : int = unicodedata.category(A_ ) if cat.startswith('''P''' ): return True return False @dataclass class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = 42 lowercase__ = True lowercase__ = None lowercase__ = None lowercase__ = -100 lowercase__ = "pt" def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Union[str, Any] ): import torch lowerCAmelCase__ : int = '''label''' if '''label''' in features[0].keys() else '''labels''' lowerCAmelCase__ : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowerCAmelCase__ : Tuple = self.tokenizer.pad( lowercase_ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='''pt''' if labels is None else None ,) if labels is None: return batch lowerCAmelCase__ : List[str] = torch.tensor(batch['''entity_ids'''] ).shape[1] lowerCAmelCase__ : Optional[Any] = self.tokenizer.padding_side if padding_side == "right": lowerCAmelCase__ : List[str] = [ list(lowercase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowercase_ )) for label in labels ] else: lowerCAmelCase__ : int = [ [self.label_pad_token_id] * (sequence_length - len(lowercase_ )) + list(lowercase_ ) for label in labels ] lowerCAmelCase__ : Tuple = [feature['''ner_tags'''] for feature in features] lowerCAmelCase__ : str = padding_tensor(lowercase_ ,-1 ,lowercase_ ,lowercase_ ) lowerCAmelCase__ : Optional[int] = [feature['''original_entity_spans'''] for feature in features] lowerCAmelCase__ : Optional[Any] = padding_tensor(lowercase_ ,(-1, -1) ,lowercase_ ,lowercase_ ) lowerCAmelCase__ : List[Any] = {k: torch.tensor(lowercase_ ,dtype=torch.intaa ) for k, v in batch.items()} return batch
106
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline __SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image'] __SCREAMING_SNAKE_CASE : int = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', ] __SCREAMING_SNAKE_CASE : int = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->int: return 32 @property def __lowerCAmelCase ( self ) ->List[str]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Tuple: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 100 @property def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = text_encoder.eval() return text_encoder @property def __lowerCAmelCase ( self ) ->Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->List[str]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq SCREAMING_SNAKE_CASE : Optional[Any] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str: SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : str = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Dict = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Optional[int] = np.array( [0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : Dict = pipeline( _lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Tuple = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--txt2img_unclip', default='kakaobrain/karlo-v1-alpha', type=str, required=False, help='The pretrained txt2img unclip.', ) __lowerCAmelCase : Optional[int] = parser.parse_args() __lowerCAmelCase : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __lowerCAmelCase : List[Any] = CLIPImageProcessor() __lowerCAmelCase : Tuple = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14') __lowerCAmelCase : Union[str, Any] = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
107
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def UpperCAmelCase_( a__ , a__=False ): """simple docstring""" SCREAMING_SNAKE_CASE : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def UpperCAmelCase_( a__ , a__ , a__=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE : Any = '''''' else: SCREAMING_SNAKE_CASE : Optional[int] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :] def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a__ , a__ ) def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(a__ , a__ ) def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = val def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = ViTMSNConfig() SCREAMING_SNAKE_CASE : Optional[int] = 1_000 SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files''' SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json''' SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) ) SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : str = idalabel SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = 384 SCREAMING_SNAKE_CASE : Any = 1_536 SCREAMING_SNAKE_CASE : List[str] = 6 elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : Optional[int] = 1_024 SCREAMING_SNAKE_CASE : Optional[int] = 4_096 SCREAMING_SNAKE_CASE : Tuple = 24 SCREAMING_SNAKE_CASE : Union[str, Any] = 16 SCREAMING_SNAKE_CASE : Dict = 0.1 elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : str = 4 elif "l7" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = 7 SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024 SCREAMING_SNAKE_CASE : List[Any] = 4_096 SCREAMING_SNAKE_CASE : List[Any] = 24 SCREAMING_SNAKE_CASE : Tuple = 16 SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder'''] SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(a__ ) SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ ) for src, dest in rename_keys: rename_key(a__ , a__ , a__ ) read_in_q_k_v(a__ , a__ , base_model=a__ ) model.load_state_dict(a__ ) model.eval() SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw ) SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor( size=config.image_size , image_mean=a__ , image_std=a__ ) SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) SCREAMING_SNAKE_CASE : Tuple = model(**a__ ) SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(a__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a__ : Any = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
313
0
"""simple docstring""" import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=() , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Tuple="no" , SCREAMING_SNAKE_CASE : Dict="29500" ): '''simple docstring''' lowerCAmelCase : List[Any] = False lowerCAmelCase : Optional[int] = False if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ): lowerCAmelCase : int = True elif "IPython" in sys.modules: lowerCAmelCase : List[str] = "google.colab" in str(sys.modules["IPython"].get_ipython() ) try: lowerCAmelCase : str = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , SCREAMING_SNAKE_CASE ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: lowerCAmelCase : Optional[Any] = 8 lowerCAmelCase : Any = PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type="TPU" ) print(f"""Launching a training on {num_processes} TPU cores.""" ) xmp.spawn(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="fork" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on one CPU." ) function(*SCREAMING_SNAKE_CASE ) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if torch.cuda.is_initialized(): raise ValueError( "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " "function." ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=SCREAMING_SNAKE_CASE , master_addr="127.0.01" , master_port=SCREAMING_SNAKE_CASE , mixed_precision=SCREAMING_SNAKE_CASE ): lowerCAmelCase : Any = PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type="MULTI_GPU" ) print(f"""Launching training on {num_processes} GPUs.""" ) try: start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="fork" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic." ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): lowerCAmelCase : Tuple = "1" print("Launching training on MPS." ) elif torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on CPU." ) function(*SCREAMING_SNAKE_CASE ) def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict=() , SCREAMING_SNAKE_CASE : Any=2 ): '''simple docstring''' from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=SCREAMING_SNAKE_CASE , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ): lowerCAmelCase : Optional[Any] = PrepareForLaunch(SCREAMING_SNAKE_CASE , debug=SCREAMING_SNAKE_CASE ) start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="fork" )
108
import csv import tweepy # Twitter API credentials a__ : Union[str, Any] = '''''' a__ : List[str] = '''''' a__ : Any = '''''' a__ : List[str] = '''''' def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ ) auth.set_access_token(a__ , a__ ) SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ ) # initialize a list to hold all the tweepy Tweets SCREAMING_SNAKE_CASE : Any = [] # make initial request for most recent tweets (200 is the maximum allowed count) SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 ) # save most recent tweets alltweets.extend(a__ ) # save the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(a__ ) > 0: print(F"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates SCREAMING_SNAKE_CASE : Any = api.user_timeline( screen_name=a__ , count=200 , max_id=a__ ) # save most recent tweets alltweets.extend(a__ ) # update the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1 print(F"""...{len(a__ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(a__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
313
0
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) UpperCAmelCase , UpperCAmelCase : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) UpperCAmelCase : List[str] = controlnet_params UpperCAmelCase : int = """bird""" UpperCAmelCase : Tuple = jax.device_count() UpperCAmelCase : Optional[int] = pipe.prepare_text_inputs([prompts] * num_samples ) UpperCAmelCase : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) UpperCAmelCase : Tuple = pipe.prepare_image_inputs([canny_image] * num_samples ) UpperCAmelCase : List[Any] = jax.random.PRNGKey(0 ) UpperCAmelCase : str = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() ) UpperCAmelCase : int = replicate(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = shard(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = shard(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = pipe( prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) UpperCAmelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCAmelCase : Optional[Any] = images[0, 253:256, 253:256, -1] UpperCAmelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase : Union[str, Any] = jnp.array( [0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : str = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) UpperCAmelCase , UpperCAmelCase : int = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) UpperCAmelCase : int = controlnet_params UpperCAmelCase : List[Any] = """Chef in the kitchen""" UpperCAmelCase : Dict = jax.device_count() UpperCAmelCase : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples ) UpperCAmelCase : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) UpperCAmelCase : List[str] = pipe.prepare_image_inputs([pose_image] * num_samples ) UpperCAmelCase : Dict = jax.random.PRNGKey(0 ) UpperCAmelCase : Any = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() ) UpperCAmelCase : Tuple = replicate(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = shard(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = shard(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = pipe( prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) UpperCAmelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCAmelCase : int = images[0, 253:256, 253:256, -1] UpperCAmelCase : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase : Any = jnp.array( [[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
109
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : List[str] = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = 'ibert' def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any: super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Dict = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = position_embedding_type SCREAMING_SNAKE_CASE : Optional[int] = quant_mode SCREAMING_SNAKE_CASE : Dict = force_dequant class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
313
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a: Union[str, Any] = { '''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a: str = [ '''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Swinv2ForImageClassification''', '''Swinv2ForMaskedImageModeling''', '''Swinv2Model''', '''Swinv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __a: Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
198
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType a__ : Any = logging.get_logger(__name__) a__ : Dict = { '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = 'imagegpt' __SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values'] __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str: SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = n_positions SCREAMING_SNAKE_CASE : Optional[int] = n_embd SCREAMING_SNAKE_CASE : List[Any] = n_layer SCREAMING_SNAKE_CASE : List[Any] = n_head SCREAMING_SNAKE_CASE : int = n_inner SCREAMING_SNAKE_CASE : Dict = activation_function SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop SCREAMING_SNAKE_CASE : Dict = embd_pdrop SCREAMING_SNAKE_CASE : List[str] = attn_pdrop SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : int = scale_attn_weights SCREAMING_SNAKE_CASE : Optional[int] = use_cache SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase ) class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]: SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) ) return inputs
313
0
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def UpperCAmelCase_ ( ): lowercase_ :Any = ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" ,type=a__ ,default=1 ,help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" ,type=a__ ,help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) ,) # rest from the training program parser.add_argument("training_script_args" ,nargs=a__ ) return parser.parse_args() def UpperCAmelCase_ ( ): lowercase_ :List[Any] = parse_args() # Import training_script as a module. lowercase_ :str = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowercase_ :Any = script_fpath.stem lowercase_ :List[Any] = importlib.import_module(a__ ) # Patch sys.argv lowercase_ :List[Any] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores ) if __name__ == "__main__": main()
223
from maths.prime_check import is_prime def UpperCAmelCase_( a__ ): """simple docstring""" if not isinstance(a__ , a__ ): SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer""" raise TypeError(a__ ) if is_prime(a__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
313
0
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins _UpperCamelCase: Tuple = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ['integration', 'unit'] ): continue item.add_marker(pytest.mark.unit ) def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' ) @pytest.fixture(autouse=a__ ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: '''simple docstring''' lowercase : Tuple = tmp_path_factory.getbasetemp() / '''cache''' lowercase : int = test_hf_cache_home / '''datasets''' lowercase : List[str] = test_hf_cache_home / '''metrics''' lowercase : Union[str, Any] = test_hf_cache_home / '''modules''' monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(a__ ) ) monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(a__ ) ) monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(a__ ) ) lowercase : int = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(a__ ) ) lowercase : Dict = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(a__ ) ) @pytest.fixture(autouse=a__ , scope='session' ) def lowercase__ ( ) -> List[Any]: '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=a__ ) def lowercase__ ( _UpperCAmelCase ) -> Dict: '''simple docstring''' monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , a__ ) @pytest.fixture def lowercase__ ( _UpperCAmelCase ) -> List[str]: '''simple docstring''' monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , a__ )
255
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline __SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[str] = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->Optional[Any]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 32 @property def __lowerCAmelCase ( self ) ->str: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Dict: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Tuple: return 100 @property def __lowerCAmelCase ( self ) ->int: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->Any: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : str = self.dummy_unet SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq SCREAMING_SNAKE_CASE : List[str] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int: SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) # create hint SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) ) SCREAMING_SNAKE_CASE : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0 SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo''' SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : List[str] = pipeline( image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
"""simple docstring""" _UpperCamelCase : List[str] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _UpperCamelCase : Any = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _UpperCamelCase : Union[str, Any] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
220
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker a__ : List[str] = '''CompVis/stable-diffusion-v1-1''' a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2''' a__ : Any = '''CompVis/stable-diffusion-v1-3''' a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4''' class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str: super()._init_() SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline( vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def __lowerCAmelCase ( self ) ->Dict[str, Any]: return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )} def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[str]: self.enable_attention_slicing(_lowerCamelCase ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict: SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(_lowerCamelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
313
0
'''simple docstring''' import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : str = argparse.ArgumentParser() parser.add_argument("""-f""" ) lowerCAmelCase__ : Dict = parser.parse_args() return args.f class lowerCAmelCase_( a__ ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> None: lowerCAmelCase__ : Dict = logging.StreamHandler(sys.stdout ) logger.addHandler(_lowerCamelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : List[str] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 ,"""run_glue_deebert.py""" ) with patch.object(_lowerCamelCase ,"""argv""" ,_lowerCamelCase ): lowerCAmelCase__ : Any = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(_lowerCamelCase ,0.6_6_6 ) @slow @require_torch_non_multi_gpu def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Optional[int] = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(_lowerCamelCase ) lowerCAmelCase__ : Optional[int] = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(_lowerCamelCase ) lowerCAmelCase__ : str = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(_lowerCamelCase )
37
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : jnp.ndarray @flax_register_to_config class a_ ( nn.Module , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") __SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False __SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280) __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8 __SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None __SCREAMING_SNAKE_CASE : int = 1280 __SCREAMING_SNAKE_CASE : float = 0.0 __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : bool = False def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict: # init input tensors SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size) SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa ) SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"] def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim # input SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types ) # down SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : int = block_out_channels[i] SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = down_blocks # mid SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): SCREAMING_SNAKE_CASE : Dict = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )] SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1 if up_block_type == "CrossAttnUpBlock2D": SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = output_channel SCREAMING_SNAKE_CASE : Tuple = up_blocks # out SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) SCREAMING_SNAKE_CASE : Any = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]: # 1. time if not isinstance(_lowerCamelCase , jnp.ndarray ): SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 ) SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase ) # 2. pre-process SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase ) # 3. down SCREAMING_SNAKE_CASE : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: SCREAMING_SNAKE_CASE : int = () for down_block_res_sample, down_block_additional_residual in zip( _lowerCamelCase , _lowerCamelCase ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples # 4. mid SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :] SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = up_block( _lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , ) else: SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train ) # 6. post-process SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
313
0
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase__ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } lowerCamelCase__ = { '''allenai/led-base-16384''': 1_6384, } class A__ ( a__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = LEDTokenizer lowercase = ['input_ids', 'attention_mask'] def __init__( self : List[str] , a : List[str]=None , a : Optional[Any]=None , a : List[Any]=None , a : Optional[Any]="replace" , a : List[str]="<s>" , a : Optional[Any]="</s>" , a : int="</s>" , a : List[str]="<s>" , a : str="<unk>" , a : Optional[Any]="<pad>" , a : Optional[Any]="<mask>" , a : Optional[Any]=False , a : List[Any]=True , **a : str , ): '''simple docstring''' super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) lowerCAmelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _lowerCamelCase ) != add_prefix_space: lowerCAmelCase__ : str = getattr(_lowerCamelCase , pre_tok_state.pop('type' ) ) lowerCAmelCase__ : Optional[int] = add_prefix_space lowerCAmelCase__ : str = pre_tok_class(**_lowerCamelCase ) lowerCAmelCase__ : Any = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowerCAmelCase__ : List[Any] = '''post_processor''' lowerCAmelCase__ : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: lowerCAmelCase__ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCAmelCase__ : Optional[int] = tuple(state['sep'] ) if "cls" in state: lowerCAmelCase__ : Optional[Any] = tuple(state['cls'] ) lowerCAmelCase__ : Any = False if state.get('add_prefix_space' , _lowerCamelCase ) != add_prefix_space: lowerCAmelCase__ : Union[str, Any] = add_prefix_space lowerCAmelCase__ : Union[str, Any] = True if state.get('trim_offsets' , _lowerCamelCase ) != trim_offsets: lowerCAmelCase__ : List[Any] = trim_offsets lowerCAmelCase__ : Union[str, Any] = True if changes_to_apply: lowerCAmelCase__ : List[str] = getattr(_lowerCamelCase , state.pop('type' ) ) lowerCAmelCase__ : List[Any] = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self : int ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self : Dict , a : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value lowerCAmelCase__ : List[Any] = value def _lowerCamelCase ( self : Dict , *a : Optional[int] , **a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = kwargs.get('is_split_into_words' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def _lowerCamelCase ( self : Optional[Any] , *a : List[Any] , **a : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = kwargs.get('is_split_into_words' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def _lowerCamelCase ( self : Any , a : Union[str, Any] , a : int = None ): '''simple docstring''' lowerCAmelCase__ : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def _lowerCamelCase ( self : str , a : Union[str, Any] , a : List[str]=None ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self : Union[str, Any] , a : Optional[int] , a : Dict = None ): '''simple docstring''' lowerCAmelCase__ : Any = [self.sep_token_id] lowerCAmelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self : Optional[Any] , a : List[str] , a : int = None , a : Union[str, Any] = PaddingStrategy.DO_NOT_PAD , a : Tuple = None , a : int = None , ): '''simple docstring''' lowerCAmelCase__ : Tuple = super()._pad( encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCAmelCase__ : Optional[Any] = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCAmelCase__ : int = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCAmelCase__ : Tuple = len(encoded_inputs['global_attention_mask'] ) != len(_lowerCamelCase ) if needs_to_be_padded: lowerCAmelCase__ : int = len(_lowerCamelCase ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCAmelCase__ : str = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": lowerCAmelCase__ : Optional[Any] = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
212
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a_ ( a__ , a__ , a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline __SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS __SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __SCREAMING_SNAKE_CASE : Any = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : Tuple = frozenset([] ) def __lowerCAmelCase ( self ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : Optional[Any] = 32 SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size # image encoding components SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL() SCREAMING_SNAKE_CASE : Optional[Any] = { # image encoding components '''feature_extractor''': feature_extractor, '''image_encoder''': image_encoder.eval(), # image noising components '''image_normalizer''': image_normalizer.eval(), '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder.eval(), '''unet''': unet.eval(), '''scheduler''': scheduler, '''vae''': vae.eval(), } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]: if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if pil_image: SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase ) inputs.update({'''image_embeds''': None} ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps'''] self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) ->Optional[int]: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' ) SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' ) SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[Any]: SCREAMING_SNAKE_CASE : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) SCREAMING_SNAKE_CASE : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' ) SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' ) SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Dict = pipe( _lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
313
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self : Tuple): '''simple docstring''' __lowercase =tempfile.mkdtemp() # fmt: off __lowercase =['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowercase =dict(zip(_lowerCamelCase , range(len(_lowerCamelCase)))) __lowercase =['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowercase ={'''unk_token''': '''<unk>'''} __lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(_lowerCamelCase) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(_lowerCamelCase)) __lowercase ={ '''do_resize''': True, '''size''': 2_0, '''do_center_crop''': True, '''crop_size''': 1_8, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } __lowercase =os.path.join(self.tmpdirname , _lowerCamelCase) with open(self.image_processor_file , 'w' , encoding='utf-8') as fp: json.dump(_lowerCamelCase , _lowerCamelCase) def __lowerCamelCase ( self : str , **_lowerCAmelCase : Union[str, Any]): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_lowerCamelCase) def __lowerCamelCase ( self : Optional[Any] , **_lowerCAmelCase : Any): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_lowerCamelCase) def __lowerCamelCase ( self : Any , **_lowerCAmelCase : Optional[int]): '''simple docstring''' return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase) def __lowerCamelCase ( self : Tuple): '''simple docstring''' shutil.rmtree(self.tmpdirname) def __lowerCamelCase ( self : Dict): '''simple docstring''' __lowercase =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)] __lowercase =[Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1)) for x in image_inputs] return image_inputs def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __lowercase =self.get_tokenizer() __lowercase =self.get_rust_tokenizer() __lowercase =self.get_image_processor() __lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase) processor_slow.save_pretrained(self.tmpdirname) __lowercase =OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase) __lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase) processor_fast.save_pretrained(self.tmpdirname) __lowercase =OwlViTProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase) def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' __lowercase =OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) __lowercase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)') __lowercase =self.get_image_processor(do_normalize=_lowerCamelCase) __lowercase =OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCamelCase) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , _lowerCamelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _lowerCamelCase) def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' __lowercase =self.get_image_processor() __lowercase =self.get_tokenizer() __lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase) __lowercase =self.prepare_image_inputs() __lowercase =image_processor(_lowerCamelCase , return_tensors='np') __lowercase =processor(images=_lowerCamelCase , return_tensors='np') for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2) def __lowerCamelCase ( self : Dict): '''simple docstring''' __lowercase =self.get_image_processor() __lowercase =self.get_tokenizer() __lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase) __lowercase ='''lower newer''' __lowercase =processor(text=_lowerCamelCase , return_tensors='np') __lowercase =tokenizer(_lowerCamelCase , return_tensors='np') for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist()) def __lowerCamelCase ( self : List[str]): '''simple docstring''' __lowercase =self.get_image_processor() __lowercase =self.get_tokenizer() __lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase) __lowercase ='''lower newer''' __lowercase =self.prepare_image_inputs() __lowercase =processor(text=_lowerCamelCase , images=_lowerCamelCase) self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values']) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase): processor() def __lowerCamelCase ( self : int): '''simple docstring''' __lowercase ='''google/owlvit-base-patch32''' __lowercase =OwlViTProcessor.from_pretrained(_lowerCamelCase) __lowercase =['''cat''', '''nasa badge'''] __lowercase =processor(text=_lowerCamelCase) __lowercase =1_6 self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask']) self.assertEqual(inputs['input_ids'].shape , (2, seq_length)) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase): processor() def __lowerCamelCase ( self : Tuple): '''simple docstring''' __lowercase ='''google/owlvit-base-patch32''' __lowercase =OwlViTProcessor.from_pretrained(_lowerCamelCase) __lowercase =[['''cat''', '''nasa badge'''], ['''person''']] __lowercase =processor(text=_lowerCamelCase) __lowercase =1_6 __lowercase =len(_lowerCamelCase) __lowercase =max([len(_lowerCamelCase) for texts in input_texts]) self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask']) self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length)) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase): processor() def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase ='''google/owlvit-base-patch32''' __lowercase =OwlViTProcessor.from_pretrained(_lowerCamelCase) __lowercase =['''cat''', '''nasa badge'''] __lowercase =processor(text=_lowerCamelCase) __lowercase =1_6 __lowercase =inputs['''input_ids'''] __lowercase =[ [4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask']) self.assertEqual(inputs['input_ids'].shape , (2, seq_length)) self.assertListEqual(list(input_ids[0]) , predicted_ids[0]) self.assertListEqual(list(input_ids[1]) , predicted_ids[1]) def __lowerCamelCase ( self : Tuple): '''simple docstring''' __lowercase =self.get_image_processor() __lowercase =self.get_tokenizer() __lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase) __lowercase =self.prepare_image_inputs() __lowercase =self.prepare_image_inputs() __lowercase =processor(images=_lowerCamelCase , query_images=_lowerCamelCase) self.assertListEqual(list(inputs.keys()) , ['query_pixel_values', 'pixel_values']) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase): processor() def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __lowercase =self.get_image_processor() __lowercase =self.get_tokenizer() __lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase) __lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowercase =processor.batch_decode(_lowerCamelCase) __lowercase =tokenizer.batch_decode(_lowerCamelCase) self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
166
from abc import ABC, abstractmethod from typing import List, Optional class a_ ( a__ ): """simple docstring""" def __init__( self ) ->List[str]: # test for the above condition self.test() def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = False while not completed: if counter == 1: self.reset() SCREAMING_SNAKE_CASE : List[Any] = self.advance() if not self.does_advance(_lowerCamelCase ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase ) counter += 1 if counter > 1_0000: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def __lowerCAmelCase ( self ) ->Optional[int]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self ) ->Union[str, Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase ) ->int: super(_lowerCamelCase , self ).__init__() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" ) if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" ) SCREAMING_SNAKE_CASE : Optional[Any] = token_ids SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids ) SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step SCREAMING_SNAKE_CASE : Any = False def __lowerCAmelCase ( self ) ->List[Any]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : str = False SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[Any] = False if self.does_advance(_lowerCamelCase ): self.fulfilled_idx += 1 SCREAMING_SNAKE_CASE : str = True if self.fulfilled_idx == (self.seqlen - 1): SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : Union[str, Any] = completed else: # failed to make progress. SCREAMING_SNAKE_CASE : Dict = True self.reset() return stepped, completed, reset def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = 0 def __lowerCAmelCase ( self ) ->Any: return self.seqlen - (self.fulfilled_idx + 1) def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict: SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : Dict = self.seqlen SCREAMING_SNAKE_CASE : int = self.fulfilled_idx SCREAMING_SNAKE_CASE : Tuple = self.completed return new_constraint class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict: SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] ) SCREAMING_SNAKE_CASE : List[str] = {} for token_ids in nested_token_ids: SCREAMING_SNAKE_CASE : Optional[Any] = root for tidx, token_id in enumerate(_lowerCamelCase ): if token_id not in level: SCREAMING_SNAKE_CASE : Any = {} SCREAMING_SNAKE_CASE : Tuple = level[token_id] if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' F""" {nested_token_ids}.""" ) SCREAMING_SNAKE_CASE : List[Any] = root def __lowerCAmelCase ( self , _lowerCamelCase ) ->int: SCREAMING_SNAKE_CASE : List[Any] = self.trie for current_token in current_seq: SCREAMING_SNAKE_CASE : int = start[current_token] SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() ) return next_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase ) return len(_lowerCamelCase ) == 0 def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: SCREAMING_SNAKE_CASE : Any = list(root.values() ) if len(_lowerCamelCase ) == 0: return 1 else: return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase ) return len(_lowerCamelCase ) != leaf_count class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase ) ->str: super(_lowerCamelCase , self ).__init__() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" ) if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ): raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" ) if any( any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" ) SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = nested_token_ids SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Optional[int] = False def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq ) if len(_lowerCamelCase ) == 0: return None else: return token_list def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = False if self.does_advance(_lowerCamelCase ): self.current_seq.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = True else: SCREAMING_SNAKE_CASE : Dict = True self.reset() SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq ) SCREAMING_SNAKE_CASE : List[Any] = completed return stepped, completed, reset def __lowerCAmelCase ( self ) ->Optional[Any]: SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[Any] = [] def __lowerCAmelCase ( self ) ->Optional[Any]: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]: SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : str = self.seqlen SCREAMING_SNAKE_CASE : int = self.current_seq SCREAMING_SNAKE_CASE : Optional[int] = self.completed return new_constraint class a_ : """simple docstring""" def __init__( self , _lowerCamelCase ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : List[Any] = constraints # max # of steps required to fulfill a given constraint SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] ) SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = False self.init_state() def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints] def __lowerCAmelCase ( self ) ->str: SCREAMING_SNAKE_CASE : str = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Tuple = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance() if isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.append(_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.extend(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance() if isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.append(_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.extend(_lowerCamelCase ) if len(_lowerCamelCase ) == 0: return None else: return token_list def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False if self.completed: SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Optional[int] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Optional[int] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) SCREAMING_SNAKE_CASE : str = None if len(self.pending_constraints ) == 0: # we're done! SCREAMING_SNAKE_CASE : Optional[Any] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_lowerCamelCase ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = None if not complete and stepped: SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. SCREAMING_SNAKE_CASE : str = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str: SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: SCREAMING_SNAKE_CASE : str = [ constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints] return new_state
313
0
"""simple docstring""" from math import pow, sqrt def _snake_case ( *lowercase__ ): _lowerCamelCase : Dict = len(a__ ) > 0 and all(value > 0.0 for value in values ) return result def _snake_case ( lowercase__ , lowercase__ ): return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ ) else ValueError('Input Error: Molar mass values must greater than 0.' ) ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(a__ , a__ , a__ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) )
96
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ): """simple docstring""" set_seed(3 ) # generate train_data and objective_set SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets( a__ , a__ , number=a__ , min_len=1_026 , trim=a__ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # load pretrained model SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ ) print('''computing perplexity on objective set''' ) SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item() print('''perplexity on objective set:''' , a__ ) # collect igf pairs and save to file demo.jbl collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ): """simple docstring""" set_seed(42 ) # Load pre-trained model SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' ) # Initialize secondary learner to use embedding weights of model SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ ) # Train secondary learner SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner( a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ ) SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ ) SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1 SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ ) model.train() if secondary_learner is not None: secondary_learner.to(a__ ) secondary_learner.eval() SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Tuple = [] # Compute the performance of the transformer model at the beginning SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ ) test_perps.append(a__ ) print('''Test perplexity, step''' , a__ , ''':''' , a__ ) for epoch in range(int(a__ ) ): for step, example in enumerate(a__ ): torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 ) SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = True if secondary_learner is not None: SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward( torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(a__ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: SCREAMING_SNAKE_CASE : str = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) SCREAMING_SNAKE_CASE : List[str] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ ) test_perps.append(a__ ) print('''Test perplexity, step''' , a__ , ''':''' , a__ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , a__ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def UpperCAmelCase_( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' ) # Required parameters parser.add_argument( '''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , ) parser.add_argument( '''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--data_file''' , type=a__ , default=a__ , help=( '''A jbl file containing tokenized data which can be split as objective dataset, ''' '''train_dataset and test_dataset.''' ) , ) parser.add_argument( '''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , ) parser.add_argument( '''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , ) parser.add_argument( '''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' ) parser.add_argument( '''--context_len''' , default=32 , type=a__ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , ) parser.add_argument( '''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' ) parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' ) parser.add_argument( '''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , ) parser.add_argument( '''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' ) parser.add_argument( '''--eval_interval''' , default=10 , type=a__ , help=( '''decay the selectivity of our secondary learner filter from''' '''1 standard deviation above average to 1 below average after 10 batches''' ) , ) parser.add_argument( '''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' ) parser.add_argument( '''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' ) parser.add_argument( '''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' ) parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' ) parser.add_argument( '''--threshold''' , default=1.0 , type=a__ , help=( '''The threshold value used by secondary learner to filter the train_data and allow only''' ''' informative data as input to the model''' ) , ) parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' ) parser.add_argument( '''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , ) # Load train data for secondary learner SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' ) # Train secondary learner SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner( a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , ) # load pretrained gpt2 model SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets( context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , ) if __name__ == "__main__": main()
313
0
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : int , _UpperCAmelCase : Optional[int] = 16 , _UpperCAmelCase : List[str] = 88 , _UpperCAmelCase : Dict = None , _UpperCAmelCase : Dict = 1 , _UpperCAmelCase : List[str] = 0.0 , _UpperCAmelCase : str = 32 , _UpperCAmelCase : Union[str, Any] = None , _UpperCAmelCase : int = False , _UpperCAmelCase : List[str] = None , _UpperCAmelCase : int = None , _UpperCAmelCase : Dict = "geglu" , _UpperCAmelCase : List[str] = None , ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_lowerCamelCase , attention_head_dim=_lowerCamelCase , in_channels=_lowerCamelCase , num_layers=_lowerCamelCase , dropout=_lowerCamelCase , norm_num_groups=_lowerCamelCase , cross_attention_dim=_lowerCamelCase , attention_bias=_lowerCamelCase , sample_size=_lowerCamelCase , num_vector_embeds=_lowerCamelCase , activation_fn=_lowerCamelCase , num_embeds_ada_norm=_lowerCamelCase , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCAmelCase__ = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCAmelCase__ = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCAmelCase__ = [1, 0] def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Tuple = True , ): """simple docstring""" UpperCAmelCase__ = hidden_states UpperCAmelCase__ = [] UpperCAmelCase__ = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCAmelCase__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCAmelCase__ = self.transformer_index_for_condition[i] UpperCAmelCase__ = self.transformers[transformer_index]( _lowerCamelCase , encoder_hidden_states=_lowerCamelCase , timestep=_lowerCamelCase , cross_attention_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCAmelCase__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCAmelCase__ = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_lowerCamelCase )
346
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() ) SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params a__ : Any = logging.getLogger(__name__) def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if metric == "rouge2": SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}''' elif metric == "loss": SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}''' else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ''' function.''' ) SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint( dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return EarlyStopping( monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , ) class a_ ( pl.Callback ): """simple docstring""" def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_lowerCamelCase ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None: logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir ) if type_path == "test": SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt''' SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt""" SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=_lowerCamelCase ) generations_file.parent.mkdir(exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , '''a+''' ) as writer: for key in sorted(_lowerCamelCase ): if key in ["log", "progress_bar", "preds"]: continue SCREAMING_SNAKE_CASE : Tuple = metrics[key] if isinstance(_lowerCamelCase , torch.Tensor ): SCREAMING_SNAKE_CASE : List[Any] = val.item() SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n""" writer.write(_lowerCamelCase ) if not save_generations: return if "preds" in metrics: SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(_lowerCamelCase ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: try: SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters() except AttributeError: SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters() SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
313
0
from heapq import heappop, heappush import numpy as np def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ): """simple docstring""" lowercase__ = grid.shape lowercase__ = [-1, 1, 0, 0] lowercase__ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] lowercase__ = [(0, source)], set() lowercase__ = np.full((rows, cols) , np.inf ) lowercase__ = 0 lowercase__ = np.empty((rows, cols) , dtype=a__ ) lowercase__ = None while queue: (lowercase__) = heappop(a__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: lowercase__ = [] while (x, y) != source: path.append((x, y) ) lowercase__ = predecessors[x, y] path.append(a__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(a__ ) ): lowercase__ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: lowercase__ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(a__ , (dist + 1, (nx, ny)) ) lowercase__ = dist + 1 lowercase__ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
110
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def UpperCAmelCase_( a__ ): """simple docstring""" if ( (cp >= 0x4_E00 and cp <= 0x9_FFF) or (cp >= 0x3_400 and cp <= 0x4_DBF) # or (cp >= 0x20_000 and cp <= 0x2A_6DF) # or (cp >= 0x2A_700 and cp <= 0x2B_73F) # or (cp >= 0x2B_740 and cp <= 0x2B_81F) # or (cp >= 0x2B_820 and cp <= 0x2C_EAF) # or (cp >= 0xF_900 and cp <= 0xF_AFF) or (cp >= 0x2F_800 and cp <= 0x2F_A1F) # ): # return True return False def UpperCAmelCase_( a__ ): """simple docstring""" for char in word: SCREAMING_SNAKE_CASE : str = ord(a__ ) if not _is_chinese_char(a__ ): return 0 return 1 def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = set() for token in tokens: SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ ) if chinese_word: word_set.add(a__ ) SCREAMING_SNAKE_CASE : str = list(a__ ) return word_list def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if not chinese_word_set: return bert_tokens SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] ) SCREAMING_SNAKE_CASE : Tuple = bert_tokens SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ ) while start < end: SCREAMING_SNAKE_CASE : Dict = True if is_chinese(bert_word[start] ): SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ ) for i in range(a__ , 1 , -1 ): SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j] SCREAMING_SNAKE_CASE : List[str] = start + i SCREAMING_SNAKE_CASE : Optional[Any] = False break if single_word: start += 1 return bert_word def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] for i in range(0 , len(a__ ) , 100 ): SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0] SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res] ltp_res.extend(a__ ) assert len(a__ ) == len(a__ ) SCREAMING_SNAKE_CASE : Any = [] for i in range(0 , len(a__ ) , 100 ): SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(a__ ) == len(a__ ) SCREAMING_SNAKE_CASE : int = [] for input_ids, chinese_word in zip(a__ , a__ ): SCREAMING_SNAKE_CASE : List[Any] = [] for id in input_ids: SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ ) input_tokens.append(a__ ) SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ ) SCREAMING_SNAKE_CASE : Dict = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(a__ ): if token[:2] == "##": SCREAMING_SNAKE_CASE : Optional[int] = token[2:] # save chinese tokens' pos if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ): ref_id.append(a__ ) ref_ids.append(a__ ) assert len(a__ ) == len(a__ ) return ref_ids def UpperCAmelCase_( a__ ): """simple docstring""" with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : List[str] = f.readlines() SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert ) SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids] f.writelines(a__ ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') a__ : int = parser.parse_args() main(args)
313
0
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @property def _UpperCamelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model @property def _UpperCamelCase ( self ) -> Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , ) return model @property def _UpperCamelCase ( self ) -> Dict: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(_lowerCamelCase ) def _UpperCamelCase ( self ) -> str: SCREAMING_SNAKE_CASE_ = self.dummy_uncond_unet SCREAMING_SNAKE_CASE_ = DDIMScheduler() SCREAMING_SNAKE_CASE_ = self.dummy_vq_model SCREAMING_SNAKE_CASE_ = LDMPipeline(unet=_lowerCamelCase , vqvae=_lowerCamelCase , scheduler=_lowerCamelCase ) ldm.to(_lowerCamelCase ) ldm.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = ldm(generator=_lowerCamelCase , num_inference_steps=2 , output_type='''numpy''' ).images SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = ldm(generator=_lowerCamelCase , num_inference_steps=2 , output_type='''numpy''' , return_dict=_lowerCamelCase )[0] SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] ) SCREAMING_SNAKE_CASE_ = 1E-2 if torch_device != '''mps''' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase ( self ) -> List[str]: SCREAMING_SNAKE_CASE_ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' ) ldm.to(_lowerCamelCase ) ldm.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ = ldm(generator=_lowerCamelCase , num_inference_steps=5 , output_type='''numpy''' ).images SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) SCREAMING_SNAKE_CASE_ = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] ) SCREAMING_SNAKE_CASE_ = 1E-2 if torch_device != '''mps''' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
299
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}""" SCREAMING_SNAKE_CASE : Tuple = '''1''' SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le''' SCREAMING_SNAKE_CASE : List[Any] = [ '''ffmpeg''', '''-i''', '''pipe:0''', '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] try: with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ ) except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0] SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa ) if audio.shape[0] == 0: raise ValueError('''Malformed soundfile''' ) return audio def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}""" SCREAMING_SNAKE_CASE : Dict = '''1''' if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE : List[Any] = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE : Dict = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system() if system == "Linux": SCREAMING_SNAKE_CASE : Dict = '''alsa''' SCREAMING_SNAKE_CASE : Any = '''default''' elif system == "Darwin": SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation''' SCREAMING_SNAKE_CASE : Optional[int] = ''':0''' elif system == "Windows": SCREAMING_SNAKE_CASE : int = '''dshow''' SCREAMING_SNAKE_CASE : Any = '''default''' SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''ffmpeg''', '''-f''', format_, '''-i''', input_, '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-fflags''', '''nobuffer''', '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ ) for item in iterator: yield item def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ): """simple docstring""" if stream_chunk_s is not None: SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s else: SCREAMING_SNAKE_CASE : List[str] = chunk_length_s SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ ) if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE : Optional[int] = np.intaa SCREAMING_SNAKE_CASE : List[Any] = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE : Any = np.floataa SCREAMING_SNAKE_CASE : Union[str, Any] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6 SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(a__ , (int, float) ): SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s] SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now() SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ ) for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ): # Put everything back in numpy scale SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = ( item['''stride'''][0] // size_of_sample, item['''stride'''][1] // size_of_sample, ) SCREAMING_SNAKE_CASE : Any = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = b'''''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride if stride_left + stride_right >= chunk_len: raise ValueError( F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for raw in iterator: acc += raw if stream and len(a__ ) < chunk_len: SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(a__ ) >= chunk_len: # We are flushing the accumulator SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right) SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride} if stream: SCREAMING_SNAKE_CASE : List[str] = False yield item SCREAMING_SNAKE_CASE : Dict = stride_left SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(a__ ) > stride_left: SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)} if stream: SCREAMING_SNAKE_CASE : Union[str, Any] = False yield item def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo try: with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process: while True: SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
313
0
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCAmelCase : '''simple docstring''' @staticmethod def _lowerCAmelCase( *__lowerCAmelCase , **__lowerCAmelCase ) -> str: pass @is_pipeline_test @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @require_torch def _lowerCAmelCase( self ) -> Any: lowercase__ : Tuple = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowercase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ : Any = image_classifier(_lowerCamelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(_lowerCamelCase ) , [ [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}], [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}], ] , ) lowercase__ : Optional[int] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, ], ] , ) @require_tf def _lowerCAmelCase( self ) -> Union[str, Any]: lowercase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ : int = image_classifier(_lowerCamelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , ) lowercase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )}, ], ] , ) @slow @require_torch def _lowerCAmelCase( self ) -> Dict: lowercase__ : Any = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowercase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ : Optional[Any] = image_classifier(_lowerCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowercase__ : int = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ : Optional[int] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowercase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ : List[Any] = image_classifier(_lowerCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) lowercase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , )
198
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
313
0
'''simple docstring''' def UpperCAmelCase_ ( __lowerCamelCase : Any ): if n == 1 or not isinstance(a__ ,a__ ): return 0 elif n == 2: return 1 else: lowercase_ :List[Any] = [0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ): lowercase_ :Any = 0 lowercase_ :str = 2 while digits < n: index += 1 lowercase_ :Any = len(str(fibonacci(a__ ) ) ) return index def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] = 10_00 ): return fibonacci_digits_index(a__ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
223
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a__ : int = logging.get_logger(__name__) a__ : Optional[Any] = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr' __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' ) SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = use_timm_backbone SCREAMING_SNAKE_CASE : Optional[int] = backbone_config SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = num_queries SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : Optional[int] = d_model SCREAMING_SNAKE_CASE : str = encoder_ffn_dim SCREAMING_SNAKE_CASE : str = encoder_layers SCREAMING_SNAKE_CASE : str = encoder_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim SCREAMING_SNAKE_CASE : int = decoder_layers SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads SCREAMING_SNAKE_CASE : List[str] = dropout SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout SCREAMING_SNAKE_CASE : str = activation_dropout SCREAMING_SNAKE_CASE : Optional[int] = activation_function SCREAMING_SNAKE_CASE : Optional[int] = init_std SCREAMING_SNAKE_CASE : List[str] = init_xavier_std SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type SCREAMING_SNAKE_CASE : str = backbone SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Dict = dilation # deformable attributes SCREAMING_SNAKE_CASE : str = num_feature_levels SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points SCREAMING_SNAKE_CASE : Any = decoder_n_points SCREAMING_SNAKE_CASE : str = two_stage SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals SCREAMING_SNAKE_CASE : Dict = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher SCREAMING_SNAKE_CASE : int = class_cost SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost SCREAMING_SNAKE_CASE : Optional[int] = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient SCREAMING_SNAKE_CASE : Tuple = focal_alpha SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def __lowerCAmelCase ( self ) ->int: return self.encoder_attention_heads @property def __lowerCAmelCase ( self ) ->int: return self.d_model def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
313
0
"""simple docstring""" import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef _UpperCamelCase: Optional[int] = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: '''simple docstring''' warnings.warn(a__ , a__ ) requires_backends(a__ , 'sklearn' ) return (preds == labels).mean() def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: '''simple docstring''' warnings.warn(a__ , a__ ) requires_backends(a__ , 'sklearn' ) lowercase : Tuple = simple_accuracy(a__ , a__ ) lowercase : Tuple = fa_score(y_true=a__ , y_pred=a__ ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: '''simple docstring''' warnings.warn(a__ , a__ ) requires_backends(a__ , 'sklearn' ) lowercase : str = pearsonr(a__ , a__ )[0] lowercase : List[str] = spearmanr(a__ , a__ )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: '''simple docstring''' warnings.warn(a__ , a__ ) requires_backends(a__ , 'sklearn' ) assert len(a__ ) == len(a__ ), f'''Predictions and labels have mismatched lengths {len(a__ )} and {len(a__ )}''' if task_name == "cola": return {"mcc": matthews_corrcoef(a__ , a__ )} elif task_name == "sst-2": return {"acc": simple_accuracy(a__ , a__ )} elif task_name == "mrpc": return acc_and_fa(a__ , a__ ) elif task_name == "sts-b": return pearson_and_spearman(a__ , a__ ) elif task_name == "qqp": return acc_and_fa(a__ , a__ ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(a__ , a__ )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(a__ , a__ )} elif task_name == "qnli": return {"acc": simple_accuracy(a__ , a__ )} elif task_name == "rte": return {"acc": simple_accuracy(a__ , a__ )} elif task_name == "wnli": return {"acc": simple_accuracy(a__ , a__ )} elif task_name == "hans": return {"acc": simple_accuracy(a__ , a__ )} else: raise KeyError(a__ ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: '''simple docstring''' warnings.warn(a__ , a__ ) requires_backends(a__ , 'sklearn' ) if len(a__ ) != len(a__ ): raise ValueError(f'''Predictions and labels have mismatched lengths {len(a__ )} and {len(a__ )}''' ) if task_name == "xnli": return {"acc": simple_accuracy(a__ , a__ )} else: raise KeyError(a__ )
255
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,) __SCREAMING_SNAKE_CASE : Optional[int] = 10 def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**_lowerCamelCase ) return config def __lowerCAmelCase ( self ) ->Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->int: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : int = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2 assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3 def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model() SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model() SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = output.prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
313
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _UpperCamelCase : List[Any] = logging.get_logger(__name__) _UpperCamelCase : List[Any] = { '''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''', } class a ( a__, a__ ): UpperCAmelCase_ : Any ='bit' UpperCAmelCase_ : Tuple =['preactivation', 'bottleneck'] UpperCAmelCase_ : Tuple =['SAME', 'VALID'] def __init__( self , _lowerCamelCase=3 , _lowerCamelCase=6_4 , _lowerCamelCase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowerCamelCase=[3, 4, 6, 3] , _lowerCamelCase="preactivation" , _lowerCamelCase="relu" , _lowerCamelCase=None , _lowerCamelCase=3_2 , _lowerCamelCase=0.0 , _lowerCamelCase=False , _lowerCamelCase=3_2 , _lowerCamelCase=1 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: lowercase = global_padding.upper() else: raise ValueError(F'Padding strategy {global_padding} not supported' ) lowercase = num_channels lowercase = embedding_size lowercase = hidden_sizes lowercase = depths lowercase = layer_type lowercase = hidden_act lowercase = global_padding lowercase = num_groups lowercase = drop_path_rate lowercase = embedding_dynamic_padding lowercase = output_stride lowercase = width_factor lowercase = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )] lowercase = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
220
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer a__ : Dict = logging.get_logger(__name__) a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} a__ : str = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } a__ : Optional[int] = { '''allenai/led-base-16384''': 16_384, } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer __SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]: super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` SCREAMING_SNAKE_CASE : List[Any] = '''post_processor''' SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] ) if "cls" in state: SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] ) SCREAMING_SNAKE_CASE : Any = False if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space SCREAMING_SNAKE_CASE : Union[str, Any] = True if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets: SCREAMING_SNAKE_CASE : List[Any] = trim_offsets SCREAMING_SNAKE_CASE : Union[str, Any] = True if changes_to_apply: SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def __lowerCAmelCase ( self ) ->str: if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]: SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value SCREAMING_SNAKE_CASE : List[Any] = value def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding: SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding: SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]: SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any: SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]: SCREAMING_SNAKE_CASE : Any = [self.sep_token_id] SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict: SCREAMING_SNAKE_CASE : Tuple = super()._pad( encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase ) if needs_to_be_padded: SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` SCREAMING_SNAKE_CASE : str = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
313
0
'''simple docstring''' # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( '''pipelines_utils''', '''0.22.0''', '''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''', standard_warn=False, stacklevel=3, )
37
from __future__ import annotations import math def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) SCREAMING_SNAKE_CASE : Dict = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(a__ ) ) ] def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(a__ ) ) ] def UpperCAmelCase_( a__ ): """simple docstring""" if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) SCREAMING_SNAKE_CASE : str = len(a__ ) SCREAMING_SNAKE_CASE : Any = matrix_length // 2 SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )] SCREAMING_SNAKE_CASE : Optional[int] = [ [a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ ) ] SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )] SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )] return top_left, top_right, bot_left, bot_right def UpperCAmelCase_( a__ ): """simple docstring""" return len(a__ ), len(matrix[0] ) def UpperCAmelCase_( a__ ): """simple docstring""" print('''\n'''.join(str(a__ ) for line in matrix ) ) def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if matrix_dimensions(a__ ) == (2, 2): return default_matrix_multiplication(a__ , a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ ) SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) ) SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ ) SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ ) SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ ) # construct the new matrix from our 4 quadrants SCREAMING_SNAKE_CASE : Optional[Any] = [] for i in range(len(a__ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(a__ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]: SCREAMING_SNAKE_CASE : Any = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(a__ ) SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ ) SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) ) SCREAMING_SNAKE_CASE : Optional[int] = matrixa SCREAMING_SNAKE_CASE : Tuple = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , a__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ ) # Removing the additional zeros for i in range(0 , a__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": a__ : Dict = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
313
0
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A__ : def __init__( self : Any , a : str , a : Dict=100 , a : Any=13 , a : str=30 , a : List[str]=2 , a : Any=3 , a : Union[str, Any]=True , a : List[Any]=True , a : str=32 , a : Any=4 , a : List[Any]=4 , a : Union[str, Any]=37 , a : str="gelu" , a : Optional[Any]=0.1 , a : List[str]=0.1 , a : List[str]=10 , a : Union[str, Any]=0.0_2 , a : List[str]=3 , a : List[Any]=None , a : Dict=[0, 1, 2, 3] , ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = parent lowerCAmelCase__ : int = 100 lowerCAmelCase__ : Dict = batch_size lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : Optional[int] = use_labels lowerCAmelCase__ : Any = hidden_size lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : int = num_attention_heads lowerCAmelCase__ : Union[str, Any] = intermediate_size lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size lowerCAmelCase__ : Any = initializer_range lowerCAmelCase__ : Any = scope lowerCAmelCase__ : Any = out_indices lowerCAmelCase__ : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase__ : Any = (image_size // patch_size) ** 2 lowerCAmelCase__ : Optional[Any] = num_patches + 1 def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : Tuple = None if self.use_labels: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase__ : Dict = self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCamelCase ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _lowerCamelCase ( self : str , a : List[Any] , a : Tuple , a : Dict , a : Any ): '''simple docstring''' lowerCAmelCase__ : List[Any] = BeitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase__ : Tuple = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : List[str] , a : Optional[Any] , a : str , a : List[str] , a : Dict ): '''simple docstring''' lowerCAmelCase__ : List[Any] = BeitForMaskedImageModeling(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase__ : int = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowerCamelCase ( self : str , a : str , a : List[str] , a : str , a : Any ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.type_sequence_label_size lowerCAmelCase__ : List[Any] = BeitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase__ : Optional[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ : Dict = 1 lowerCAmelCase__ : Union[str, Any] = BeitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : str = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCamelCase ( self : Optional[Any] , a : int , a : Union[str, Any] , a : Any , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Any = self.num_labels lowerCAmelCase__ : Union[str, Any] = BeitForSemanticSegmentation(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase__ : Any = model(_lowerCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) lowerCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = self.prepare_config_and_inputs() lowerCAmelCase__ : int = config_and_inputs lowerCAmelCase__ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A__ ( a__ , a__ , unittest.TestCase ): lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowercase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[Any] = BeitModelTester(self ) lowerCAmelCase__ : Tuple = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='BEiT does not use inputs_embeds' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _lowerCamelCase ( self : str ): '''simple docstring''' pass def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : str = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : List[Any] = model_class(_lowerCamelCase ) lowerCAmelCase__ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Optional[int] = [*signature.parameters.keys()] lowerCAmelCase__ : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Any = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(_lowerCamelCase ), BeitForMaskedImageModeling]: continue lowerCAmelCase__ : Any = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.train() lowerCAmelCase__ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) lowerCAmelCase__ : List[Any] = model(**_lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowerCAmelCase__ : Any = False lowerCAmelCase__ : int = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(_lowerCamelCase ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue lowerCAmelCase__ : Union[str, Any] = model_class(_lowerCamelCase ) model.gradient_checkpointing_enable() model.to(_lowerCamelCase ) model.train() lowerCAmelCase__ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) lowerCAmelCase__ : Any = model(**_lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : List[Any] = _config_zero_init(_lowerCamelCase ) for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = model_class(config=_lowerCamelCase ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _lowerCamelCase ( self : List[str] ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Any = BeitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def lowerCAmelCase__ ( ) -> List[Any]: lowerCAmelCase__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : List[Any] = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(_lowerCamelCase ) lowerCAmelCase__ : Any = self.default_image_processor lowerCAmelCase__ : Dict = prepare_img() lowerCAmelCase__ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='pt' ).pixel_values.to(_lowerCamelCase ) # prepare bool_masked_pos lowerCAmelCase__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(pixel_values=_lowerCamelCase , bool_masked_pos=_lowerCamelCase ) lowerCAmelCase__ : int = outputs.logits # verify the logits lowerCAmelCase__ : Optional[Any] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , _lowerCamelCase ) lowerCAmelCase__ : str = torch.tensor( [[-3.2_4_3_7, 0.5_0_7_2, -13.9_174], [-3.2_4_5_6, 0.4_9_4_8, -13.9_401], [-3.2_0_3_3, 0.5_1_2_1, -13.8_550]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _lowerCamelCase , atol=1E-2 ) ) @slow def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(_lowerCamelCase ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : int = prepare_img() lowerCAmelCase__ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Any = model(**_lowerCamelCase ) lowerCAmelCase__ : List[Any] = outputs.logits # verify the logits lowerCAmelCase__ : int = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , _lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) lowerCAmelCase__ : Optional[int] = 281 self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase ) @slow def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Any = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to( _lowerCamelCase ) lowerCAmelCase__ : Any = self.default_image_processor lowerCAmelCase__ : Dict = prepare_img() lowerCAmelCase__ : Dict = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : int = model(**_lowerCamelCase ) lowerCAmelCase__ : List[str] = outputs.logits # verify the logits lowerCAmelCase__ : List[str] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , _lowerCamelCase ) lowerCAmelCase__ : List[str] = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) lowerCAmelCase__ : Optional[int] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase ) @slow def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : int = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) lowerCAmelCase__ : Any = model.to(_lowerCamelCase ) lowerCAmelCase__ : Any = BeitImageProcessor(do_resize=_lowerCamelCase , size=640 , do_center_crop=_lowerCamelCase ) lowerCAmelCase__ : Optional[int] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) lowerCAmelCase__ : Union[str, Any] = Image.open(ds[0]['file'] ) lowerCAmelCase__ : Any = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**_lowerCamelCase ) lowerCAmelCase__ : Any = outputs.logits # verify the logits lowerCAmelCase__ : Tuple = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , _lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = version.parse(PIL.__version__ ) < version.parse('9.0.0' ) if is_pillow_less_than_a: lowerCAmelCase__ : List[str] = torch.tensor( [ [[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]], [[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]], [[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]], ] , device=_lowerCamelCase , ) else: lowerCAmelCase__ : Optional[int] = torch.tensor( [ [[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]], [[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]], [[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]], ] , device=_lowerCamelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) lowerCAmelCase__ : Dict = model.to(_lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = BeitImageProcessor(do_resize=_lowerCamelCase , size=640 , do_center_crop=_lowerCamelCase ) lowerCAmelCase__ : Optional[int] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) lowerCAmelCase__ : str = Image.open(ds[0]['file'] ) lowerCAmelCase__ : Any = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**_lowerCamelCase ) lowerCAmelCase__ : str = outputs.logits.detach().cpu() lowerCAmelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(500, 300)] ) lowerCAmelCase__ : Union[str, Any] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , _lowerCamelCase ) lowerCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase ) lowerCAmelCase__ : List[str] = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , _lowerCamelCase )
212
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''') class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any: SCREAMING_SNAKE_CASE : str = scheduler SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers] SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer SCREAMING_SNAKE_CASE : List[str] = GradientState() def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes for _ in range(_lowerCamelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) else: self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Union[str, Any]: return self.scheduler.get_last_lr() def __lowerCAmelCase ( self ) ->List[str]: return self.scheduler.state_dict() def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: self.scheduler.load_state_dict(_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: return self.scheduler.get_lr() def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]: return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
313
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _A ( _lowerCAmelCase , _lowerCAmelCase=False ): """simple docstring""" __lowercase =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('module.cls_token', 'vit.embeddings.cls_token'), ('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('module.pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('module.norm.weight', 'layernorm.weight'), ('module.norm.bias', 'layernorm.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __lowercase =[(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: __lowercase ='''''' else: __lowercase ='''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowercase =state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" ) __lowercase =state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowercase =in_proj_weight[ : config.hidden_size, : ] __lowercase =in_proj_bias[: config.hidden_size] __lowercase =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowercase =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowercase =in_proj_weight[ -config.hidden_size :, : ] __lowercase =in_proj_bias[-config.hidden_size :] def _A ( _lowerCAmelCase ): """simple docstring""" __lowercase =['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a__ , a__ ) def _A ( _lowerCAmelCase ): """simple docstring""" __lowercase =[ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(a__ , a__ ) def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" __lowercase =dct.pop(a__ ) __lowercase =val def _A ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" __lowercase =ViTMSNConfig() __lowercase =1_000 __lowercase ='''datasets/huggingface/label-files''' __lowercase ='''imagenet-1k-id2label.json''' __lowercase =json.load(open(hf_hub_download(a__ , a__ ) , 'r' ) ) __lowercase ={int(a__ ): v for k, v in idalabel.items()} __lowercase =idalabel __lowercase ={v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: __lowercase =384 __lowercase =1_536 __lowercase =6 elif "l16" in checkpoint_url: __lowercase =1_024 __lowercase =4_096 __lowercase =24 __lowercase =16 __lowercase =0.1 elif "b4" in checkpoint_url: __lowercase =4 elif "l7" in checkpoint_url: __lowercase =7 __lowercase =1_024 __lowercase =4_096 __lowercase =24 __lowercase =16 __lowercase =0.1 __lowercase =ViTMSNModel(a__ ) __lowercase =torch.hub.load_state_dict_from_url(a__ , map_location='cpu' )['''target_encoder'''] __lowercase =ViTImageProcessor(size=config.image_size ) remove_projection_head(a__ ) __lowercase =create_rename_keys(a__ , base_model=a__ ) for src, dest in rename_keys: rename_key(a__ , a__ , a__ ) read_in_q_k_v(a__ , a__ , base_model=a__ ) model.load_state_dict(a__ ) model.eval() __lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowercase =Image.open(requests.get(a__ , stream=a__ ).raw ) __lowercase =ViTImageProcessor( size=config.image_size , image_mean=a__ , image_std=a__ ) __lowercase =image_processor(images=a__ , return_tensors='pt' ) # forward pass torch.manual_seed(2 ) __lowercase =model(**a__ ) __lowercase =outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: __lowercase =torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] ) elif "b16" in checkpoint_url: __lowercase =torch.tensor([[14.28_89, -18.90_45, 11.72_81]] ) elif "l16" in checkpoint_url: __lowercase =torch.tensor([[41.50_28, -22.86_81, 45.64_75]] ) elif "b4" in checkpoint_url: __lowercase =torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] ) else: __lowercase =torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(a__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowerCamelCase = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
166
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a__ : Optional[Any] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['''memory_attention''', '''encoder_attn'''], ['''attention''', '''attn'''], ['''/''', '''.'''], ['''.LayerNorm.gamma''', '''_layer_norm.weight'''], ['''.LayerNorm.beta''', '''_layer_norm.bias'''], ['''r.layer_''', '''r.layers.'''], ['''output_proj''', '''out_proj'''], ['''ffn.dense_1.''', '''fc2.'''], ['''ffn.dense.''', '''fc1.'''], ['''ffn_layer_norm''', '''final_layer_norm'''], ['''kernel''', '''weight'''], ['''encoder_layer_norm.''', '''encoder.layer_norm.'''], ['''decoder_layer_norm.''', '''decoder.layer_norm.'''], ['''embeddings.weights''', '''shared.weight'''], ] def UpperCAmelCase_( a__ ): """simple docstring""" for pegasus_name, hf_name in PATTERNS: SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ ) return k def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy() cfg_kwargs.update(a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ ) SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ ) SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict() SCREAMING_SNAKE_CASE : List[str] = {} for k, v in tf_weights.items(): SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: SCREAMING_SNAKE_CASE : Dict = v.T SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) SCREAMING_SNAKE_CASE : int = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ ) SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ): SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name ) if skip_key: continue SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ ) SCREAMING_SNAKE_CASE : Any = array return tf_weights def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(a__ ) # convert model SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ ) SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": SCREAMING_SNAKE_CASE : int = task_specific_params SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ ) torch_model.save_pretrained(a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') a__ : List[str] = parser.parse_args() if args.save_dir is None: a__ : Any = Path(args.tf_ckpt_path).parent.name a__ : int = os.path.join('''pegasus''', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
313
0
"""simple docstring""" from random import shuffle import tensorflow as tf from numpy import array def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : List[Any] = int(a__ ) assert noofclusters < len(a__ ) # Find out the dimensionality _lowerCamelCase : Dict = len(vectors[0] ) # Will help select random centroids from among the available vectors _lowerCamelCase : Any = list(range(len(a__ ) ) ) shuffle(a__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. _lowerCamelCase : Union[str, Any] = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION _lowerCamelCase : Optional[Any] = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points _lowerCamelCase : Union[str, Any] = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(a__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values _lowerCamelCase : str = tf.placeholder('float64' , [dim] ) _lowerCamelCase : Dict = [] for centroid in centroids: cent_assigns.append(tf.assign(a__ , a__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) _lowerCamelCase : int = [tf.Variable(0 ) for i in range(len(a__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value _lowerCamelCase : str = tf.placeholder('int32' ) _lowerCamelCase : int = [] for assignment in assignments: cluster_assigns.append(tf.assign(a__ , a__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input _lowerCamelCase : Optional[int] = tf.placeholder('float' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors _lowerCamelCase : int = tf.reduce_mean(a__ , 0 ) ##Node for computing Euclidean distances # Placeholders for input _lowerCamelCase : Union[str, Any] = tf.placeholder('float' , [dim] ) _lowerCamelCase : str = tf.placeholder('float' , [dim] ) _lowerCamelCase : Union[str, Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a__ , a__ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input _lowerCamelCase : int = tf.placeholder('float' , [noofclusters] ) _lowerCamelCase : Union[str, Any] = tf.argmin(a__ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. _lowerCamelCase : int = tf.initialize_all_variables() # Initialize all variables sess.run(a__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. _lowerCamelCase : Tuple = 100 for _ in range(a__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(a__ ) ): _lowerCamelCase : Union[str, Any] = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. _lowerCamelCase : Optional[int] = [ sess.run(a__ , feed_dict={va: vect, va: sess.run(a__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input _lowerCamelCase : Optional[int] = sess.run( a__ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(a__ ): # Collect all the vectors assigned to this cluster _lowerCamelCase : str = [ vectors[i] for i in range(len(a__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location _lowerCamelCase : List[Any] = sess.run( a__ , feed_dict={mean_input: array(a__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments _lowerCamelCase : Optional[Any] = sess.run(a__ ) _lowerCamelCase : Optional[int] = sess.run(a__ ) return centroids, assignments
96
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline __SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image'] __SCREAMING_SNAKE_CASE : int = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', ] __SCREAMING_SNAKE_CASE : int = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->int: return 32 @property def __lowerCAmelCase ( self ) ->List[str]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Tuple: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 100 @property def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = text_encoder.eval() return text_encoder @property def __lowerCAmelCase ( self ) ->Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->List[str]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq SCREAMING_SNAKE_CASE : Optional[Any] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str: SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : str = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Dict = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Optional[int] = np.array( [0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : Dict = pipeline( _lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Tuple = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
'''simple docstring''' from __future__ import annotations class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple , _UpperCAmelCase : Optional[int]=None ): """simple docstring""" UpperCAmelCase__ = data UpperCAmelCase__ = None def __repr__( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = [] UpperCAmelCase__ = self while temp: string_rep.append(f'''{temp.data}''' ) UpperCAmelCase__ = temp.next return "->".join(_lowerCamelCase ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' if not elements_list: raise Exception("""The Elements List is empty""" ) UpperCAmelCase__ = Node(elements_list[0] ) for i in range(1 , len(a__ ) ): UpperCAmelCase__ = Node(elements_list[i] ) UpperCAmelCase__ = current.next return head def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' if head_node is not None and isinstance(a__ , a__ ): print_reverse(head_node.next ) print(head_node.data ) def _UpperCamelCase ( ): '''simple docstring''' from doctest import testmod testmod() UpperCAmelCase__ = make_linked_list([14, 52, 14, 12, 43] ) print("""Linked List:""" ) print(a__ ) print("""Elements in Reverse:""" ) print_reverse(a__ ) if __name__ == "__main__": main()
346
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def UpperCAmelCase_( a__ , a__=False ): """simple docstring""" SCREAMING_SNAKE_CASE : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def UpperCAmelCase_( a__ , a__ , a__=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE : Any = '''''' else: SCREAMING_SNAKE_CASE : Optional[int] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :] def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a__ , a__ ) def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(a__ , a__ ) def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = val def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = ViTMSNConfig() SCREAMING_SNAKE_CASE : Optional[int] = 1_000 SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files''' SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json''' SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) ) SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : str = idalabel SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = 384 SCREAMING_SNAKE_CASE : Any = 1_536 SCREAMING_SNAKE_CASE : List[str] = 6 elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : Optional[int] = 1_024 SCREAMING_SNAKE_CASE : Optional[int] = 4_096 SCREAMING_SNAKE_CASE : Tuple = 24 SCREAMING_SNAKE_CASE : Union[str, Any] = 16 SCREAMING_SNAKE_CASE : Dict = 0.1 elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : str = 4 elif "l7" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = 7 SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024 SCREAMING_SNAKE_CASE : List[Any] = 4_096 SCREAMING_SNAKE_CASE : List[Any] = 24 SCREAMING_SNAKE_CASE : Tuple = 16 SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder'''] SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(a__ ) SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ ) for src, dest in rename_keys: rename_key(a__ , a__ , a__ ) read_in_q_k_v(a__ , a__ , base_model=a__ ) model.load_state_dict(a__ ) model.eval() SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw ) SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor( size=config.image_size , image_mean=a__ , image_std=a__ ) SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) SCREAMING_SNAKE_CASE : Tuple = model(**a__ ) SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(a__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a__ : Any = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
313
0
def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
110
import csv import tweepy # Twitter API credentials a__ : Union[str, Any] = '''''' a__ : List[str] = '''''' a__ : Any = '''''' a__ : List[str] = '''''' def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ ) auth.set_access_token(a__ , a__ ) SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ ) # initialize a list to hold all the tweepy Tweets SCREAMING_SNAKE_CASE : Any = [] # make initial request for most recent tweets (200 is the maximum allowed count) SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 ) # save most recent tweets alltweets.extend(a__ ) # save the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(a__ ) > 0: print(F"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates SCREAMING_SNAKE_CASE : Any = api.user_timeline( screen_name=a__ , count=200 , max_id=a__ ) # save most recent tweets alltweets.extend(a__ ) # update the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1 print(F"""...{len(a__ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(a__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
313
0
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : """simple docstring""" UpperCAmelCase_ =None UpperCAmelCase_ =False UpperCAmelCase_ =False UpperCAmelCase_ =False UpperCAmelCase_ =None UpperCAmelCase_ =None UpperCAmelCase_ =False UpperCAmelCase_ =False UpperCAmelCase_ =False UpperCAmelCase_ =True UpperCAmelCase_ =None UpperCAmelCase_ =1 UpperCAmelCase_ =None UpperCAmelCase_ =False UpperCAmelCase_ =None UpperCAmelCase_ =None def _UpperCamelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(_lowerCamelCase ) for k, v in self.__dict__.items()} )
299
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : List[str] = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = 'ibert' def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any: super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Dict = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = position_embedding_type SCREAMING_SNAKE_CASE : Optional[int] = quant_mode SCREAMING_SNAKE_CASE : Dict = force_dequant class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
313
0
'''simple docstring''' import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __a: Any = logging.get_logger(__name__) class UpperCAmelCase ( enum.Enum ): '''simple docstring''' SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 @add_end_docstrings(a__ ) class UpperCAmelCase ( a__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = 'generated' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> str: super().__init__(*_lowerCamelCase , **_lowerCamelCase ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def _lowerCAmelCase( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> Dict: lowercase__ : Any = {} if truncation is not None: lowercase__ : int = truncation lowercase__ : Any = generate_kwargs lowercase__ : Tuple = {} if return_tensors is not None and return_type is None: lowercase__ : Optional[int] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: lowercase__ : Dict = return_type if clean_up_tokenization_spaces is not None: lowercase__ : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: lowercase__ : Any = self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) if len(_lowerCamelCase ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) lowercase__ : str = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str: return True def _lowerCAmelCase( self , *__lowerCAmelCase , __lowerCAmelCase ) -> str: lowercase__ : Any = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] , _lowerCamelCase ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) lowercase__ : List[str] = ([prefix + arg for arg in args[0]],) lowercase__ : Tuple = True elif isinstance(args[0] , _lowerCamelCase ): lowercase__ : Any = (prefix + args[0],) lowercase__ : Dict = False else: raise ValueError( F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) lowercase__ : int = self.tokenizer(*_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> str: lowercase__ : int = super().__call__(*_lowerCamelCase , **_lowerCamelCase ) if ( isinstance(args[0] , _lowerCamelCase ) and all(isinstance(_lowerCamelCase , _lowerCamelCase ) for el in args[0] ) and all(len(_lowerCamelCase ) == 1 for res in result ) ): return [res[0] for res in result] return result def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , **__lowerCAmelCase ) -> Tuple: lowercase__ : List[Any] = self._parse_and_tokenize(_lowerCamelCase , truncation=_lowerCamelCase , **_lowerCamelCase ) return inputs def _lowerCAmelCase( self , __lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]: if self.framework == "pt": lowercase__ : Union[str, Any] = model_inputs['''input_ids'''].shape elif self.framework == "tf": lowercase__ : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy() lowercase__ : List[Any] = generate_kwargs.get('''min_length''' , self.model.config.min_length ) lowercase__ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) self.check_inputs(_lowerCamelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] ) lowercase__ : Optional[int] = self.model.generate(**_lowerCamelCase , **_lowerCamelCase ) lowercase__ : List[Any] = output_ids.shape[0] if self.framework == "pt": lowercase__ : Union[str, Any] = output_ids.reshape(_lowerCamelCase , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": lowercase__ : List[str] = tf.reshape(_lowerCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.TEXT , __lowerCAmelCase=False ) -> Union[str, Any]: lowercase__ : Union[str, Any] = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: lowercase__ : Dict = {F"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: lowercase__ : Tuple = { F"""{self.return_name}_text""": self.tokenizer.decode( _lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , ) } records.append(_lowerCamelCase ) return records @add_end_docstrings(a__ ) class UpperCAmelCase ( a__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = 'summary' def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return super().__call__(*_lowerCamelCase , **_lowerCamelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool: if max_length < min_length: logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(a__ ) class UpperCAmelCase ( a__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = 'translation' def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: if input_length > 0.9 * max_length: logger.warning( F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def _lowerCAmelCase( self , *__lowerCAmelCase , __lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Optional[int]: if getattr(self.tokenizer , '''_build_translation_inputs''' , _lowerCamelCase ): return self.tokenizer._build_translation_inputs( *_lowerCamelCase , return_tensors=self.framework , truncation=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase ) else: return super()._parse_and_tokenize(*_lowerCamelCase , truncation=_lowerCamelCase ) def _lowerCAmelCase( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ) -> int: lowercase__ : Optional[int] = super()._sanitize_parameters(**_lowerCamelCase ) if src_lang is not None: lowercase__ : Optional[Any] = src_lang if tgt_lang is not None: lowercase__ : List[str] = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. lowercase__ : Optional[int] = kwargs.get('''task''' , self.task ) lowercase__ : int = task.split('''_''' ) if task and len(_lowerCamelCase ) == 4: # translation, XX, to YY lowercase__ : Any = items[1] lowercase__ : Tuple = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> str: return super().__call__(*_lowerCamelCase , **_lowerCamelCase )
198
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType a__ : Any = logging.get_logger(__name__) a__ : Dict = { '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = 'imagegpt' __SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values'] __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str: SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = n_positions SCREAMING_SNAKE_CASE : Optional[int] = n_embd SCREAMING_SNAKE_CASE : List[Any] = n_layer SCREAMING_SNAKE_CASE : List[Any] = n_head SCREAMING_SNAKE_CASE : int = n_inner SCREAMING_SNAKE_CASE : Dict = activation_function SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop SCREAMING_SNAKE_CASE : Dict = embd_pdrop SCREAMING_SNAKE_CASE : List[str] = attn_pdrop SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : int = scale_attn_weights SCREAMING_SNAKE_CASE : Optional[int] = use_cache SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase ) class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]: SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) ) return inputs
313
0
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class a_ ( unittest.TestCase ): def lowercase__ ( self : int ): """simple docstring""" lowercase_ :Tuple = torch.nn.Linear(10 , 10 ) lowercase_ :Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) lowercase_ :int = Accelerator() lowercase_ :Any = accelerator.prepare(_lowerCamelCase ) try: pickle.loads(pickle.dumps(_lowerCamelCase ) ) except Exception as e: self.fail(F'Accelerated optimizer pickling failed with {e}' ) AcceleratorState._reset_state()
223
from maths.prime_check import is_prime def UpperCAmelCase_( a__ ): """simple docstring""" if not isinstance(a__ , a__ ): SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer""" raise TypeError(a__ ) if is_prime(a__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
313
0
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class a__ ( unittest.TestCase ): def lowercase ( self : str ) -> None: lowercase : Dict = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ), 1 ) self.assertEqual(x.component(2 ), 3 ) lowercase : List[Any] = Vector() def lowercase ( self : List[Any] ) -> None: lowercase : Optional[int] = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_lowerCamelCase ), '(0,0,0,0,0,1)' ) def lowercase ( self : int ) -> None: lowercase : Tuple = Vector([1, 2, 3, 4] ) self.assertEqual(len(_lowerCamelCase ), 4 ) def lowercase ( self : Any ) -> None: lowercase : Dict = Vector([1, 2] ) lowercase : str = Vector([1, 2, 3, 4, 5] ) lowercase : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) lowercase : Tuple = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length(), 2.236, 3 ) self.assertAlmostEqual(y.euclidean_length(), 7.416, 3 ) self.assertEqual(z.euclidean_length(), 0 ) self.assertAlmostEqual(w.euclidean_length(), 7.616, 3 ) def lowercase ( self : Dict ) -> None: lowercase : Dict = Vector([1, 2, 3] ) lowercase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ), 2 ) self.assertEqual((x + y).component(1 ), 3 ) self.assertEqual((x + y).component(2 ), 4 ) def lowercase ( self : Tuple ) -> None: lowercase : List[str] = Vector([1, 2, 3] ) lowercase : List[Any] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ), 0 ) self.assertEqual((x - y).component(1 ), 1 ) self.assertEqual((x - y).component(2 ), 2 ) def lowercase ( self : Tuple ) -> None: lowercase : int = Vector([1, 2, 3] ) lowercase : Optional[int] = Vector([2, -1, 4] ) # for test of dot product lowercase : str = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ), '(3.0,6.0,9.0)' ) self.assertEqual((a * b), 0 ) def lowercase ( self : List[str] ) -> None: self.assertEqual(str(zero_vector(10 ) ).count('0' ), 10 ) def lowercase ( self : Union[str, Any] ) -> None: self.assertEqual(str(unit_basis_vector(3, 1 ) ), '(0,1,0)' ) def lowercase ( self : Union[str, Any] ) -> None: lowercase : int = Vector([1, 2, 3] ) lowercase : Union[str, Any] = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2, _lowerCamelCase, _lowerCamelCase ) ), '(3,4,7)' ) def lowercase ( self : Union[str, Any] ) -> None: lowercase : Tuple = Vector([1, 0, 0, 0, 0, 0] ) lowercase : str = x.copy() self.assertEqual(str(_lowerCamelCase ), str(_lowerCamelCase ) ) def lowercase ( self : Any ) -> None: lowercase : Dict = Vector([1, 0, 0] ) x.change_component(0, 0 ) x.change_component(1, 1 ) self.assertEqual(str(_lowerCamelCase ), '(0,1,0)' ) def lowercase ( self : int ) -> None: lowercase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n', str(_lowerCamelCase ) ) def lowercase ( self : Union[str, Any] ) -> None: lowercase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) lowercase : Optional[int] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y], a.minor(_lowerCamelCase, _lowerCamelCase ) ) def lowercase ( self : Union[str, Any] ) -> None: lowercase : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) lowercase : str = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y], a.cofactor(_lowerCamelCase, _lowerCamelCase ) ) def lowercase ( self : Any ) -> None: lowercase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) self.assertEqual(-5, a.determinant() ) def lowercase ( self : str ) -> None: lowercase : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3 ) lowercase : Optional[Any] = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)', str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n', str(a * 2 ) ) def lowercase ( self : Union[str, Any] ) -> None: lowercase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) a.change_component(0, 2, 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n', str(_lowerCamelCase ) ) def lowercase ( self : Dict ) -> None: lowercase : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) self.assertEqual(7, a.component(2, 1 ), 0.01 ) def lowercase ( self : int ) -> None: lowercase : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) lowercase : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n', str(a + b ) ) def lowercase ( self : List[str] ) -> None: lowercase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) lowercase : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n', str(a - b ) ) def lowercase ( self : Dict ) -> None: self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n', str(square_zero_matrix(5 ) ), ) if __name__ == "__main__": unittest.main()
255
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline __SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[str] = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->Optional[Any]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 32 @property def __lowerCAmelCase ( self ) ->str: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Dict: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Tuple: return 100 @property def __lowerCAmelCase ( self ) ->int: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->Any: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : str = self.dummy_unet SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq SCREAMING_SNAKE_CASE : List[str] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int: SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) # create hint SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) ) SCREAMING_SNAKE_CASE : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0 SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo''' SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : List[str] = pipeline( image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
"""simple docstring""" from __future__ import annotations _UpperCamelCase : str = 1.6_0_2_1e-1_9 # units = C def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : str , __snake_case : Dict , ): '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
220
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker a__ : List[str] = '''CompVis/stable-diffusion-v1-1''' a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2''' a__ : Any = '''CompVis/stable-diffusion-v1-3''' a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4''' class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str: super()._init_() SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline( vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def __lowerCAmelCase ( self ) ->Dict[str, Any]: return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )} def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[str]: self.enable_attention_slicing(_lowerCamelCase ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict: SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(_lowerCamelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
313
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if len(a__ ) != len(a__ ): raise ValueError("""The length of profit and weight must be same.""" ) if max_weight <= 0: raise ValueError("""max_weight must greater than zero.""" ) if any(p < 0 for p in profit ): raise ValueError("""Profit can not be negative.""" ) if any(w < 0 for w in weight ): raise ValueError("""Weight can not be negative.""" ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. lowerCAmelCase__ : List[Any] = [p / w for p, w in zip(a__ , a__ )] # Creating a copy of the list and sorting profit/weight in ascending order lowerCAmelCase__ : int = sorted(a__ ) # declaring useful variables lowerCAmelCase__ : List[Any] = len(a__ ) lowerCAmelCase__ : Dict = 0 lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : Dict = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight lowerCAmelCase__ : str = sorted_profit_by_weight[length - i - 1] lowerCAmelCase__ : Any = profit_by_weight.index(a__ ) lowerCAmelCase__ : Tuple = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( '''Input profits, weights, and then max_weight (all positive ints) separated by ''' '''spaces.''' ) _lowerCAmelCase = [int(x) for x in input('''Input profits separated by spaces: ''').split()] _lowerCAmelCase = [int(x) for x in input('''Input weights separated by spaces: ''').split()] _lowerCAmelCase = int(input('''Max weight allowed: ''')) # Function Call calc_profit(profit, weight, max_weight)
37
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : jnp.ndarray @flax_register_to_config class a_ ( nn.Module , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") __SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False __SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280) __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8 __SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None __SCREAMING_SNAKE_CASE : int = 1280 __SCREAMING_SNAKE_CASE : float = 0.0 __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : bool = False def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict: # init input tensors SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size) SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa ) SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"] def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim # input SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types ) # down SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : int = block_out_channels[i] SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = down_blocks # mid SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): SCREAMING_SNAKE_CASE : Dict = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )] SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1 if up_block_type == "CrossAttnUpBlock2D": SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = output_channel SCREAMING_SNAKE_CASE : Tuple = up_blocks # out SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) SCREAMING_SNAKE_CASE : Any = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]: # 1. time if not isinstance(_lowerCamelCase , jnp.ndarray ): SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 ) SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase ) # 2. pre-process SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase ) # 3. down SCREAMING_SNAKE_CASE : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: SCREAMING_SNAKE_CASE : int = () for down_block_res_sample, down_block_additional_residual in zip( _lowerCamelCase , _lowerCamelCase ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples # 4. mid SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :] SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = up_block( _lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , ) else: SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train ) # 6. post-process SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
313
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class A__ ( a__ ): lowercase = 'data2vec-text' def __init__( self : Optional[int] , a : int=30_522 , a : Optional[Any]=768 , a : List[str]=12 , a : Tuple=12 , a : int=3_072 , a : Optional[int]="gelu" , a : Union[str, Any]=0.1 , a : Tuple=0.1 , a : Union[str, Any]=512 , a : Tuple=2 , a : int=0.0_2 , a : List[str]=1E-12 , a : Union[str, Any]=1 , a : str=0 , a : List[Any]=2 , a : List[str]="absolute" , a : Tuple=True , a : Any=None , **a : List[Any] , ): '''simple docstring''' super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) lowerCAmelCase__ : Dict = vocab_size lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : int = num_attention_heads lowerCAmelCase__ : Any = hidden_act lowerCAmelCase__ : Dict = intermediate_size lowerCAmelCase__ : List[Any] = hidden_dropout_prob lowerCAmelCase__ : Any = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Dict = initializer_range lowerCAmelCase__ : str = layer_norm_eps lowerCAmelCase__ : Tuple = position_embedding_type lowerCAmelCase__ : int = use_cache lowerCAmelCase__ : List[Any] = classifier_dropout class A__ ( a__ ): @property def _lowerCamelCase ( self : int ): '''simple docstring''' if self.task == "multiple-choice": lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase__ : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
212
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a_ ( a__ , a__ , a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline __SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS __SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __SCREAMING_SNAKE_CASE : Any = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : Tuple = frozenset([] ) def __lowerCAmelCase ( self ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : Optional[Any] = 32 SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size # image encoding components SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL() SCREAMING_SNAKE_CASE : Optional[Any] = { # image encoding components '''feature_extractor''': feature_extractor, '''image_encoder''': image_encoder.eval(), # image noising components '''image_normalizer''': image_normalizer.eval(), '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder.eval(), '''unet''': unet.eval(), '''scheduler''': scheduler, '''vae''': vae.eval(), } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]: if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if pil_image: SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase ) inputs.update({'''image_embeds''': None} ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps'''] self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) ->Optional[int]: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' ) SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' ) SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[Any]: SCREAMING_SNAKE_CASE : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) SCREAMING_SNAKE_CASE : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' ) SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' ) SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Dict = pipe( _lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
313
0
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def _A ( _lowerCAmelCase ): """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: __lowercase =model_type_to_module_name(a__ ) __lowercase =importlib.import_module(f""".{module_name}""" , 'transformers.models' ) try: return getattr(a__ , a__ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(a__ , '__name__' , a__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. __lowercase =importlib.import_module('transformers' ) if hasattr(a__ , a__ ): return getattr(a__ , a__ ) return None def _A ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , **_lowerCAmelCase , ): """simple docstring""" __lowercase =get_file_from_repo( a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , ) if resolved_config_file is None: logger.info( 'Could not locate the feature extractor configuration file, will try to use the model config instead.' ) return {} with open(a__ , encoding='utf-8' ) as reader: return json.load(a__ ) class _UpperCamelCase : '''simple docstring''' def __init__( self : Union[str, Any]): '''simple docstring''' raise EnvironmentError( 'AutoFeatureExtractor is designed to be instantiated ' 'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.') @classmethod @replace_list_option_in_docstrings(_lowerCamelCase) def __lowerCamelCase ( cls : Any , _lowerCAmelCase : List[Any] , **_lowerCAmelCase : str): '''simple docstring''' __lowercase =kwargs.pop('config' , _lowerCamelCase) __lowercase =kwargs.pop('trust_remote_code' , _lowerCamelCase) __lowercase =True __lowercase =FeatureExtractionMixin.get_feature_extractor_dict(_lowerCamelCase , **_lowerCamelCase) __lowercase =config_dict.get('feature_extractor_type' , _lowerCamelCase) __lowercase =None if "AutoFeatureExtractor" in config_dict.get('auto_map' , {}): __lowercase =config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_lowerCamelCase , _lowerCamelCase): __lowercase =AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase) # It could be in `config.feature_extractor_type`` __lowercase =getattr(_lowerCamelCase , 'feature_extractor_type' , _lowerCamelCase) if hasattr(_lowerCamelCase , 'auto_map') and "AutoFeatureExtractor" in config.auto_map: __lowercase =config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: __lowercase =feature_extractor_class_from_name(_lowerCamelCase) __lowercase =feature_extractor_auto_map is not None __lowercase =feature_extractor_class is not None or type(_lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING __lowercase =resolve_trust_remote_code( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) if has_remote_code and trust_remote_code: __lowercase =get_class_from_dynamic_module( _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase) __lowercase =kwargs.pop('code_revision' , _lowerCamelCase) if os.path.isdir(_lowerCamelCase): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING: __lowercase =FEATURE_EXTRACTOR_MAPPING[type(_lowerCamelCase)] return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase) raise ValueError( f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}""") @staticmethod def __lowerCamelCase ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]): '''simple docstring''' FEATURE_EXTRACTOR_MAPPING.register(_lowerCamelCase , _lowerCamelCase)
166
from abc import ABC, abstractmethod from typing import List, Optional class a_ ( a__ ): """simple docstring""" def __init__( self ) ->List[str]: # test for the above condition self.test() def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = False while not completed: if counter == 1: self.reset() SCREAMING_SNAKE_CASE : List[Any] = self.advance() if not self.does_advance(_lowerCamelCase ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase ) counter += 1 if counter > 1_0000: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def __lowerCAmelCase ( self ) ->Optional[int]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self ) ->Union[str, Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase ) ->int: super(_lowerCamelCase , self ).__init__() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" ) if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" ) SCREAMING_SNAKE_CASE : Optional[Any] = token_ids SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids ) SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step SCREAMING_SNAKE_CASE : Any = False def __lowerCAmelCase ( self ) ->List[Any]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : str = False SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[Any] = False if self.does_advance(_lowerCamelCase ): self.fulfilled_idx += 1 SCREAMING_SNAKE_CASE : str = True if self.fulfilled_idx == (self.seqlen - 1): SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : Union[str, Any] = completed else: # failed to make progress. SCREAMING_SNAKE_CASE : Dict = True self.reset() return stepped, completed, reset def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = 0 def __lowerCAmelCase ( self ) ->Any: return self.seqlen - (self.fulfilled_idx + 1) def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict: SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : Dict = self.seqlen SCREAMING_SNAKE_CASE : int = self.fulfilled_idx SCREAMING_SNAKE_CASE : Tuple = self.completed return new_constraint class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict: SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] ) SCREAMING_SNAKE_CASE : List[str] = {} for token_ids in nested_token_ids: SCREAMING_SNAKE_CASE : Optional[Any] = root for tidx, token_id in enumerate(_lowerCamelCase ): if token_id not in level: SCREAMING_SNAKE_CASE : Any = {} SCREAMING_SNAKE_CASE : Tuple = level[token_id] if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' F""" {nested_token_ids}.""" ) SCREAMING_SNAKE_CASE : List[Any] = root def __lowerCAmelCase ( self , _lowerCamelCase ) ->int: SCREAMING_SNAKE_CASE : List[Any] = self.trie for current_token in current_seq: SCREAMING_SNAKE_CASE : int = start[current_token] SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() ) return next_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase ) return len(_lowerCamelCase ) == 0 def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: SCREAMING_SNAKE_CASE : Any = list(root.values() ) if len(_lowerCamelCase ) == 0: return 1 else: return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase ) return len(_lowerCamelCase ) != leaf_count class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase ) ->str: super(_lowerCamelCase , self ).__init__() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" ) if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ): raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" ) if any( any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" ) SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = nested_token_ids SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Optional[int] = False def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq ) if len(_lowerCamelCase ) == 0: return None else: return token_list def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = False if self.does_advance(_lowerCamelCase ): self.current_seq.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = True else: SCREAMING_SNAKE_CASE : Dict = True self.reset() SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq ) SCREAMING_SNAKE_CASE : List[Any] = completed return stepped, completed, reset def __lowerCAmelCase ( self ) ->Optional[Any]: SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[Any] = [] def __lowerCAmelCase ( self ) ->Optional[Any]: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]: SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : str = self.seqlen SCREAMING_SNAKE_CASE : int = self.current_seq SCREAMING_SNAKE_CASE : Optional[int] = self.completed return new_constraint class a_ : """simple docstring""" def __init__( self , _lowerCamelCase ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : List[Any] = constraints # max # of steps required to fulfill a given constraint SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] ) SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = False self.init_state() def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints] def __lowerCAmelCase ( self ) ->str: SCREAMING_SNAKE_CASE : str = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Tuple = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance() if isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.append(_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.extend(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance() if isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.append(_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.extend(_lowerCamelCase ) if len(_lowerCamelCase ) == 0: return None else: return token_list def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False if self.completed: SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Optional[int] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Optional[int] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) SCREAMING_SNAKE_CASE : str = None if len(self.pending_constraints ) == 0: # we're done! SCREAMING_SNAKE_CASE : Optional[Any] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_lowerCamelCase ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = None if not complete and stepped: SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. SCREAMING_SNAKE_CASE : str = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str: SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: SCREAMING_SNAKE_CASE : str = [ constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints] return new_state
313
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase__ = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ['''LayoutLMv2FeatureExtractor'''] lowercase__ = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ): """simple docstring""" set_seed(3 ) # generate train_data and objective_set SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets( a__ , a__ , number=a__ , min_len=1_026 , trim=a__ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # load pretrained model SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ ) print('''computing perplexity on objective set''' ) SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item() print('''perplexity on objective set:''' , a__ ) # collect igf pairs and save to file demo.jbl collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ): """simple docstring""" set_seed(42 ) # Load pre-trained model SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' ) # Initialize secondary learner to use embedding weights of model SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ ) # Train secondary learner SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner( a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ ) SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ ) SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1 SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ ) model.train() if secondary_learner is not None: secondary_learner.to(a__ ) secondary_learner.eval() SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Tuple = [] # Compute the performance of the transformer model at the beginning SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ ) test_perps.append(a__ ) print('''Test perplexity, step''' , a__ , ''':''' , a__ ) for epoch in range(int(a__ ) ): for step, example in enumerate(a__ ): torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 ) SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = True if secondary_learner is not None: SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward( torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(a__ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: SCREAMING_SNAKE_CASE : str = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) SCREAMING_SNAKE_CASE : List[str] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ ) test_perps.append(a__ ) print('''Test perplexity, step''' , a__ , ''':''' , a__ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , a__ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def UpperCAmelCase_( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' ) # Required parameters parser.add_argument( '''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , ) parser.add_argument( '''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--data_file''' , type=a__ , default=a__ , help=( '''A jbl file containing tokenized data which can be split as objective dataset, ''' '''train_dataset and test_dataset.''' ) , ) parser.add_argument( '''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , ) parser.add_argument( '''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , ) parser.add_argument( '''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' ) parser.add_argument( '''--context_len''' , default=32 , type=a__ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , ) parser.add_argument( '''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' ) parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' ) parser.add_argument( '''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , ) parser.add_argument( '''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' ) parser.add_argument( '''--eval_interval''' , default=10 , type=a__ , help=( '''decay the selectivity of our secondary learner filter from''' '''1 standard deviation above average to 1 below average after 10 batches''' ) , ) parser.add_argument( '''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' ) parser.add_argument( '''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' ) parser.add_argument( '''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' ) parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' ) parser.add_argument( '''--threshold''' , default=1.0 , type=a__ , help=( '''The threshold value used by secondary learner to filter the train_data and allow only''' ''' informative data as input to the model''' ) , ) parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' ) parser.add_argument( '''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , ) # Load train data for secondary learner SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' ) # Train secondary learner SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner( a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , ) # load pretrained gpt2 model SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets( context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , ) if __name__ == "__main__": main()
313
0
'''simple docstring''' import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( """compression_format, is_archive""" , [ ("""7z""", True), ("""bz2""", False), ("""gzip""", False), ("""lz4""", False), ("""tar""", True), ("""xz""", False), ("""zip""", True), ("""zstd""", False), ] , ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ = { '''7z''': (seven_zip_file, SevenZipExtractor), '''bz2''': (bza_file, BzipaExtractor), '''gzip''': (gz_file, GzipExtractor), '''lz4''': (lza_file, LzaExtractor), '''tar''': (tar_file, TarExtractor), '''xz''': (xz_file, XzExtractor), '''zip''': (zip_file, ZipExtractor), '''zstd''': (zstd_file, ZstdExtractor), } UpperCAmelCase__ = input_paths_and_base_extractors[compression_format] if input_path is None: UpperCAmelCase__ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(a__ ) assert base_extractor.is_extractable(a__ ) UpperCAmelCase__ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') base_extractor.extract(a__ , a__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name UpperCAmelCase__ = file_path.read_text(encoding="""utf-8""" ) else: UpperCAmelCase__ = output_path.read_text(encoding="""utf-8""" ) UpperCAmelCase__ = text_file.read_text(encoding="""utf-8""" ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( """compression_format, is_archive""" , [ ("""7z""", True), ("""bz2""", False), ("""gzip""", False), ("""lz4""", False), ("""tar""", True), ("""xz""", False), ("""zip""", True), ("""zstd""", False), ] , ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' UpperCAmelCase__ = { '''7z''': seven_zip_file, '''bz2''': bza_file, '''gzip''': gz_file, '''lz4''': lza_file, '''tar''': tar_file, '''xz''': xz_file, '''zip''': zip_file, '''zstd''': zstd_file, } UpperCAmelCase__ = input_paths[compression_format] if input_path is None: UpperCAmelCase__ = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(a__ ) UpperCAmelCase__ = Extractor.infer_extractor_format(a__ ) assert extractor_format is not None UpperCAmelCase__ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') Extractor.extract(a__ , a__ , a__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name UpperCAmelCase__ = file_path.read_text(encoding="""utf-8""" ) else: UpperCAmelCase__ = output_path.read_text(encoding="""utf-8""" ) UpperCAmelCase__ = text_file.read_text(encoding="""utf-8""" ) assert extracted_file_content == expected_file_content @pytest.fixture def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' import tarfile UpperCAmelCase__ = tmp_path / '''data_dot_dot''' directory.mkdir() UpperCAmelCase__ = directory / '''tar_file_with_dot_dot.tar''' with tarfile.TarFile(a__ , """w""" ) as f: f.add(a__ , arcname=os.path.join("""..""" , text_file.name ) ) return path @pytest.fixture def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' import tarfile UpperCAmelCase__ = tmp_path / '''data_sym_link''' directory.mkdir() UpperCAmelCase__ = directory / '''tar_file_with_sym_link.tar''' os.symlink("""..""" , directory / """subdir""" , target_is_directory=a__ ) with tarfile.TarFile(a__ , """w""" ) as f: f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( """insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' UpperCAmelCase__ = { '''tar_file_with_dot_dot''': tar_file_with_dot_dot, '''tar_file_with_sym_link''': tar_file_with_sym_link, } UpperCAmelCase__ = insecure_tar_files[insecure_tar_file] UpperCAmelCase__ = tmp_path / '''extracted''' TarExtractor.extract(a__ , a__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ = tmpdir / '''not_a_zip_file''' # From: https://github.com/python/cpython/pull/5053 UpperCAmelCase__ = ( b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00''' b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I''' b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07''' b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82''' ) with not_a_zip_file.open("""wb""" ) as f: f.write(a__ ) assert zipfile.is_zipfile(str(a__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(a__ ) # but we're right
346
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() ) SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params a__ : Any = logging.getLogger(__name__) def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if metric == "rouge2": SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}''' elif metric == "loss": SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}''' else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ''' function.''' ) SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint( dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return EarlyStopping( monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , ) class a_ ( pl.Callback ): """simple docstring""" def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_lowerCamelCase ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None: logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir ) if type_path == "test": SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt''' SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt""" SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=_lowerCamelCase ) generations_file.parent.mkdir(exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , '''a+''' ) as writer: for key in sorted(_lowerCamelCase ): if key in ["log", "progress_bar", "preds"]: continue SCREAMING_SNAKE_CASE : Tuple = metrics[key] if isinstance(_lowerCamelCase , torch.Tensor ): SCREAMING_SNAKE_CASE : List[Any] = val.item() SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n""" writer.write(_lowerCamelCase ) if not save_generations: return if "preds" in metrics: SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(_lowerCamelCase ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: try: SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters() except AttributeError: SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters() SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' ) @rank_zero_only def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
313
0
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowerCAmelCase = '''src/transformers''' lowerCAmelCase = '''docs/source/en/tasks''' def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" with open(a__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowercase__ = f.readlines() # Find the start prompt. lowercase__ = 0 while not lines[start_index].startswith(a__ ): start_index += 1 start_index += 1 lowercase__ = start_index while not lines[end_index].startswith(a__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) lowerCAmelCase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowerCAmelCase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = TASK_GUIDE_TO_MODELS[task_guide] lowercase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(a__ , set() ) lowercase__ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ): """simple docstring""" lowercase__ = _find_text_in_file( filename=os.path.join(a__ , a__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) lowercase__ = get_model_list_for_task(a__ ) if current_list != new_list: if overwrite: with open(os.path.join(a__ , a__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' ''' to fix this.''' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowerCAmelCase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
110
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def UpperCAmelCase_( a__ ): """simple docstring""" if ( (cp >= 0x4_E00 and cp <= 0x9_FFF) or (cp >= 0x3_400 and cp <= 0x4_DBF) # or (cp >= 0x20_000 and cp <= 0x2A_6DF) # or (cp >= 0x2A_700 and cp <= 0x2B_73F) # or (cp >= 0x2B_740 and cp <= 0x2B_81F) # or (cp >= 0x2B_820 and cp <= 0x2C_EAF) # or (cp >= 0xF_900 and cp <= 0xF_AFF) or (cp >= 0x2F_800 and cp <= 0x2F_A1F) # ): # return True return False def UpperCAmelCase_( a__ ): """simple docstring""" for char in word: SCREAMING_SNAKE_CASE : str = ord(a__ ) if not _is_chinese_char(a__ ): return 0 return 1 def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = set() for token in tokens: SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ ) if chinese_word: word_set.add(a__ ) SCREAMING_SNAKE_CASE : str = list(a__ ) return word_list def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if not chinese_word_set: return bert_tokens SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] ) SCREAMING_SNAKE_CASE : Tuple = bert_tokens SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ ) while start < end: SCREAMING_SNAKE_CASE : Dict = True if is_chinese(bert_word[start] ): SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ ) for i in range(a__ , 1 , -1 ): SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j] SCREAMING_SNAKE_CASE : List[str] = start + i SCREAMING_SNAKE_CASE : Optional[Any] = False break if single_word: start += 1 return bert_word def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] for i in range(0 , len(a__ ) , 100 ): SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0] SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res] ltp_res.extend(a__ ) assert len(a__ ) == len(a__ ) SCREAMING_SNAKE_CASE : Any = [] for i in range(0 , len(a__ ) , 100 ): SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(a__ ) == len(a__ ) SCREAMING_SNAKE_CASE : int = [] for input_ids, chinese_word in zip(a__ , a__ ): SCREAMING_SNAKE_CASE : List[Any] = [] for id in input_ids: SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ ) input_tokens.append(a__ ) SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ ) SCREAMING_SNAKE_CASE : Dict = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(a__ ): if token[:2] == "##": SCREAMING_SNAKE_CASE : Optional[int] = token[2:] # save chinese tokens' pos if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ): ref_id.append(a__ ) ref_ids.append(a__ ) assert len(a__ ) == len(a__ ) return ref_ids def UpperCAmelCase_( a__ ): """simple docstring""" with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : List[str] = f.readlines() SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert ) SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids] f.writelines(a__ ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') a__ : int = parser.parse_args() main(args)
313
0
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __UpperCAmelCase = logging.getLogger(__name__) class UpperCamelCase__ ( a__ ): """simple docstring""" def __init__( self , _A , _A , _A , _A=None ) -> Optional[int]: super().__init__( _lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , ) SCREAMING_SNAKE_CASE_ = None def _UpperCamelCase ( self , _A ) -> Optional[Any]: logger.info('''initializing retrieval''' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('''dist initialized''' ) # needs to be set manually SCREAMING_SNAKE_CASE_ = self._infer_socket_ifname() # avoid clash with the NCCL port SCREAMING_SNAKE_CASE_ = str(distributed_port + 1 ) SCREAMING_SNAKE_CASE_ = dist.new_group(ranks=_lowerCamelCase , backend='''gloo''' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('''dist not initialized / main''' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def _UpperCamelCase ( self ) -> List[Any]: return dist.get_rank(group=self.process_group ) == 0 def _UpperCamelCase ( self , _A , _A , _A=torch.floataa ) -> List[Any]: SCREAMING_SNAKE_CASE_ = torch.empty(_lowerCamelCase , dtype=_lowerCamelCase ) dist.scatter(_lowerCamelCase , src=0 , scatter_list=_lowerCamelCase , group=self.process_group ) return target_tensor def _UpperCamelCase ( self ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = psutil.net_if_addrs() # a hacky way to deal with varying network interface names SCREAMING_SNAKE_CASE_ = next((addr for addr in addrs if addr.startswith('''e''' )) , _lowerCamelCase ) return ifname def _UpperCamelCase ( self , _A , _A ) -> Tuple[np.ndarray, List[dict]]: # single GPU training if not dist.is_initialized(): SCREAMING_SNAKE_CASE_ = self._main_retrieve(_lowerCamelCase , _lowerCamelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCamelCase ) # distributed training SCREAMING_SNAKE_CASE_ = dist.get_world_size(group=self.process_group ) # gather logic SCREAMING_SNAKE_CASE_ = None if self._is_main(): SCREAMING_SNAKE_CASE_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_lowerCamelCase )] dist.gather(torch.tensor(_lowerCamelCase ) , dst=0 , gather_list=_lowerCamelCase , group=self.process_group ) # scatter logic SCREAMING_SNAKE_CASE_ = question_hidden_states.shape[0] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] if self._is_main(): assert len(_lowerCamelCase ) == world_size SCREAMING_SNAKE_CASE_ = self._main_retrieve(torch.cat(_lowerCamelCase ).numpy() , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCamelCase ), torch.tensor(_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = self._chunk_tensor(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = self._chunk_tensor(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = self._scattered(_lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa ) SCREAMING_SNAKE_CASE_ = self._scattered(_lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_lowerCamelCase )
299
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}""" SCREAMING_SNAKE_CASE : Tuple = '''1''' SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le''' SCREAMING_SNAKE_CASE : List[Any] = [ '''ffmpeg''', '''-i''', '''pipe:0''', '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] try: with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ ) except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0] SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa ) if audio.shape[0] == 0: raise ValueError('''Malformed soundfile''' ) return audio def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}""" SCREAMING_SNAKE_CASE : Dict = '''1''' if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE : List[Any] = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE : Dict = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system() if system == "Linux": SCREAMING_SNAKE_CASE : Dict = '''alsa''' SCREAMING_SNAKE_CASE : Any = '''default''' elif system == "Darwin": SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation''' SCREAMING_SNAKE_CASE : Optional[int] = ''':0''' elif system == "Windows": SCREAMING_SNAKE_CASE : int = '''dshow''' SCREAMING_SNAKE_CASE : Any = '''default''' SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''ffmpeg''', '''-f''', format_, '''-i''', input_, '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-fflags''', '''nobuffer''', '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ ) for item in iterator: yield item def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ): """simple docstring""" if stream_chunk_s is not None: SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s else: SCREAMING_SNAKE_CASE : List[str] = chunk_length_s SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ ) if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE : Optional[int] = np.intaa SCREAMING_SNAKE_CASE : List[Any] = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE : Any = np.floataa SCREAMING_SNAKE_CASE : Union[str, Any] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6 SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(a__ , (int, float) ): SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s] SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now() SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ ) for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ): # Put everything back in numpy scale SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = ( item['''stride'''][0] // size_of_sample, item['''stride'''][1] // size_of_sample, ) SCREAMING_SNAKE_CASE : Any = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = b'''''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride if stride_left + stride_right >= chunk_len: raise ValueError( F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for raw in iterator: acc += raw if stream and len(a__ ) < chunk_len: SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(a__ ) >= chunk_len: # We are flushing the accumulator SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right) SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride} if stream: SCREAMING_SNAKE_CASE : List[str] = False yield item SCREAMING_SNAKE_CASE : Dict = stride_left SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(a__ ) > stride_left: SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)} if stream: SCREAMING_SNAKE_CASE : Union[str, Any] = False yield item def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo try: with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process: while True: SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
313
0
'''simple docstring''' import numpy as np def __UpperCamelCase ( UpperCAmelCase ): return 1 / (1 + np.exp(-vector )) def __UpperCamelCase ( UpperCAmelCase ): return vector * sigmoid(1.7_0_2 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
198
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
313
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Tuple =logging.get_logger(__name__) lowerCAmelCase : Optional[Any] ={'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase : str ={ '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, } lowerCAmelCase : List[str] ={ '''moussaKam/mbarthez''': 1_024, '''moussaKam/barthez''': 1_024, '''moussaKam/barthez-orangesum-title''': 1_024, } lowerCAmelCase : List[str] ='''▁''' class a_ ( a__ ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ['input_ids', 'attention_mask'] def __init__( self : int , lowercase : Tuple , lowercase : Optional[int]="<s>" , lowercase : Optional[Any]="</s>" , lowercase : Dict="</s>" , lowercase : Any="<s>" , lowercase : int="<unk>" , lowercase : str="<pad>" , lowercase : str="<mask>" , lowercase : Union[str, Any] = None , **lowercase : Any , ): """simple docstring""" lowercase_ :Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token lowercase_ :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) lowercase_ :Optional[Any] = vocab_file lowercase_ :Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) lowercase_ :List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase_ :Union[str, Any] = len(self.sp_model ) - 1 lowercase_ :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowercase__ ( self : Union[str, Any] , lowercase : Any , lowercase : Optional[Any] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase_ :Tuple = [self.cls_token_id] lowercase_ :List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self : Any , lowercase : List[str] , lowercase : List[Any] = None , lowercase : Optional[Any] = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def lowercase__ ( self : List[Any] , lowercase : int , lowercase : Dict = None ): """simple docstring""" lowercase_ :List[str] = [self.sep_token_id] lowercase_ :str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase__ ( self : List[Any] ): """simple docstring""" return len(self.sp_model ) def lowercase__ ( self : List[Any] ): """simple docstring""" lowercase_ :Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self : int , lowercase : int ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def lowercase__ ( self : int , lowercase : Union[str, Any] ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase_ :Dict = self.sp_model.PieceToId(_lowerCamelCase ) return spm_id if spm_id else self.unk_token_id def lowercase__ ( self : Dict , lowercase : Dict ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(_lowerCamelCase ) def lowercase__ ( self : Union[str, Any] , lowercase : int ): """simple docstring""" lowercase_ :Optional[Any] = [] lowercase_ :Dict = '''''' lowercase_ :str = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCamelCase ) + token lowercase_ :List[Any] = True lowercase_ :Dict = [] else: current_sub_tokens.append(_lowerCamelCase ) lowercase_ :List[Any] = False out_string += self.sp_model.decode(_lowerCamelCase ) return out_string.strip() def __getstate__( self : List[Any] ): """simple docstring""" lowercase_ :Tuple = self.__dict__.copy() lowercase_ :str = None return state def __setstate__( self : Optional[int] , lowercase : Any ): """simple docstring""" lowercase_ :List[str] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowercase_ :int = {} lowercase_ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self : Any , lowercase : Dict , lowercase : Dict = None ): """simple docstring""" if not os.path.isdir(_lowerCamelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowercase_ :Tuple = os.path.join( _lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , "wb" ) as fi: lowercase_ :Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
223
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a__ : int = logging.get_logger(__name__) a__ : Optional[Any] = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr' __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' ) SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = use_timm_backbone SCREAMING_SNAKE_CASE : Optional[int] = backbone_config SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = num_queries SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : Optional[int] = d_model SCREAMING_SNAKE_CASE : str = encoder_ffn_dim SCREAMING_SNAKE_CASE : str = encoder_layers SCREAMING_SNAKE_CASE : str = encoder_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim SCREAMING_SNAKE_CASE : int = decoder_layers SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads SCREAMING_SNAKE_CASE : List[str] = dropout SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout SCREAMING_SNAKE_CASE : str = activation_dropout SCREAMING_SNAKE_CASE : Optional[int] = activation_function SCREAMING_SNAKE_CASE : Optional[int] = init_std SCREAMING_SNAKE_CASE : List[str] = init_xavier_std SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type SCREAMING_SNAKE_CASE : str = backbone SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone SCREAMING_SNAKE_CASE : Dict = dilation # deformable attributes SCREAMING_SNAKE_CASE : str = num_feature_levels SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points SCREAMING_SNAKE_CASE : Any = decoder_n_points SCREAMING_SNAKE_CASE : str = two_stage SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals SCREAMING_SNAKE_CASE : Dict = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher SCREAMING_SNAKE_CASE : int = class_cost SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost SCREAMING_SNAKE_CASE : Optional[int] = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient SCREAMING_SNAKE_CASE : Tuple = focal_alpha SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def __lowerCAmelCase ( self ) ->int: return self.encoder_attention_heads @property def __lowerCAmelCase ( self ) ->int: return self.d_model def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output
313
0
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ : def __init__( self : Any, lowerCAmelCase : Tuple, lowerCAmelCase : int=3, lowerCAmelCase : Union[str, Any]=32, lowerCAmelCase : List[str]=3, lowerCAmelCase : Dict=10, lowerCAmelCase : List[str]=[10, 20, 30, 40], lowerCAmelCase : str=[1, 1, 2, 1], lowerCAmelCase : Tuple=True, lowerCAmelCase : Union[str, Any]=True, lowerCAmelCase : Optional[int]="relu", lowerCAmelCase : List[Any]=3, lowerCAmelCase : int=None, ) -> Tuple: lowercase : Dict = parent lowercase : Optional[Any] = batch_size lowercase : Tuple = image_size lowercase : Optional[int] = num_channels lowercase : Dict = embeddings_size lowercase : Optional[Any] = hidden_sizes lowercase : str = depths lowercase : List[str] = is_training lowercase : str = use_labels lowercase : Any = hidden_act lowercase : Optional[Any] = num_labels lowercase : Any = scope lowercase : Union[str, Any] = len(_lowerCamelCase ) def lowercase ( self : Optional[Any] ) -> Optional[Any]: lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : str = None if self.use_labels: lowercase : int = ids_tensor([self.batch_size], self.num_labels ) lowercase : Union[str, Any] = self.get_config() return config, pixel_values, labels def lowercase ( self : List[str] ) -> List[str]: return ResNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, ) def lowercase ( self : Any, lowerCAmelCase : Any, lowerCAmelCase : List[str], lowerCAmelCase : int ) -> Union[str, Any]: lowercase : List[str] = TFResNetModel(config=_lowerCamelCase ) lowercase : Any = model(_lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def lowercase ( self : Any, lowerCAmelCase : Optional[int], lowerCAmelCase : int, lowerCAmelCase : Dict ) -> Tuple: lowercase : Optional[Any] = self.num_labels lowercase : Tuple = TFResNetForImageClassification(_lowerCamelCase ) lowercase : Optional[Any] = model(_lowerCamelCase, labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def lowercase ( self : Any ) -> str: lowercase : Tuple = self.prepare_config_and_inputs() lowercase : Tuple = config_and_inputs lowercase : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class a__ ( a__, a__, unittest.TestCase ): _lowerCamelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _lowerCamelCase = ( {'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification} if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def lowercase ( self : List[Any] ) -> List[Any]: lowercase : List[Any] = TFResNetModelTester(self ) lowercase : Optional[Any] = ConfigTester(self, config_class=_lowerCamelCase, has_text_modality=_lowerCamelCase ) def lowercase ( self : List[str] ) -> Optional[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase ( self : Optional[Any] ) -> str: return @unittest.skip(reason='ResNet does not use inputs_embeds' ) def lowercase ( self : str ) -> List[str]: pass @unittest.skip(reason='ResNet does not support input and output embeddings' ) def lowercase ( self : Any ) -> Union[str, Any]: pass def lowercase ( self : Optional[Any] ) -> str: lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : int = model_class(_lowerCamelCase ) lowercase : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : Dict = [*signature.parameters.keys()] lowercase : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1], _lowerCamelCase ) def lowercase ( self : List[Any] ) -> List[Any]: lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowercase ( self : Union[str, Any] ) -> Optional[Any]: def check_hidden_states_output(lowerCAmelCase : Optional[int], lowerCAmelCase : Any, lowerCAmelCase : List[str] ): lowercase : List[str] = model_class(_lowerCamelCase ) lowercase : Any = model(**self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ) ) lowercase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : Tuple = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ), expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase : Optional[int] = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase : List[str] = layer_type lowercase : str = True check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Optional[int] = True check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) def lowercase ( self : Optional[int] ) -> int: lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def lowercase ( self : Dict ) -> List[Any]: for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Optional[Any] = TFResNetModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def lowercase__ ( ) -> Dict: '''simple docstring''' lowercase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a__ ( unittest.TestCase ): @cached_property def lowercase ( self : Union[str, Any] ) -> Optional[int]: return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase ( self : List[Any] ) -> str: lowercase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase : str = self.default_image_processor lowercase : Any = prepare_img() lowercase : Dict = image_processor(images=_lowerCamelCase, return_tensors='tf' ) # forward pass lowercase : Optional[int] = model(**_lowerCamelCase ) # verify the logits lowercase : int = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape, _lowerCamelCase ) lowercase : Tuple = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), _lowerCamelCase, atol=1e-4 ) )
255
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,) __SCREAMING_SNAKE_CASE : Optional[int] = 10 def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**_lowerCamelCase ) return config def __lowerCAmelCase ( self ) ->Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->int: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : int = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2 assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3 def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model() SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model() SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = output.prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
313
0
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class a ( a__, unittest.TestCase ): UpperCAmelCase_ : Optional[int] =BarthezTokenizer UpperCAmelCase_ : Dict =BarthezTokenizerFast UpperCAmelCase_ : str =True UpperCAmelCase_ : Union[str, Any] =True def UpperCamelCase_ ( self ): super().setUp() lowercase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=_lowerCamelCase ) lowercase = tokenizer def UpperCamelCase_ ( self ): lowercase = '''<pad>''' lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def UpperCamelCase_ ( self ): lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(_lowerCamelCase ) , 1_0_1_1_2_2 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def UpperCamelCase_ ( self ): lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowercase = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] lowercase = self.tokenizer( _lowerCamelCase , max_length=len(_lowerCamelCase ) , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='pt' ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowercase = batch.input_ids.tolist()[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def UpperCamelCase_ ( self ): if not self.test_rust_tokenizer: return lowercase = self.get_tokenizer() lowercase = self.get_rust_tokenizer() lowercase = '''I was born in 92000, and this is falsé.''' lowercase = tokenizer.tokenize(_lowerCamelCase ) lowercase = rust_tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) lowercase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) lowercase = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) lowercase = self.get_rust_tokenizer() lowercase = tokenizer.encode(_lowerCamelCase ) lowercase = rust_tokenizer.encode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) @slow def UpperCamelCase_ ( self ): # fmt: off lowercase = {'''input_ids''': [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowercase = [ '''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ''' '''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''', '''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ''' '''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ''' '''telles que la traduction et la synthèse de texte.''', ] self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_lowerCamelCase , )
220
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer a__ : Dict = logging.get_logger(__name__) a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} a__ : str = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } a__ : Optional[int] = { '''allenai/led-base-16384''': 16_384, } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer __SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]: super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` SCREAMING_SNAKE_CASE : List[Any] = '''post_processor''' SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] ) if "cls" in state: SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] ) SCREAMING_SNAKE_CASE : Any = False if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space SCREAMING_SNAKE_CASE : Union[str, Any] = True if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets: SCREAMING_SNAKE_CASE : List[Any] = trim_offsets SCREAMING_SNAKE_CASE : Union[str, Any] = True if changes_to_apply: SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def __lowerCAmelCase ( self ) ->str: if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]: SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value SCREAMING_SNAKE_CASE : List[Any] = value def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding: SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding: SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]: SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any: SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]: SCREAMING_SNAKE_CASE : Any = [self.sep_token_id] SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict: SCREAMING_SNAKE_CASE : Tuple = super()._pad( encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase ) if needs_to_be_padded: SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` SCREAMING_SNAKE_CASE : str = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
313
0
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = ['''model.decoder.embed_positions.weights'''] def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if "emb" in name: lowerCAmelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: lowerCAmelCase__ : int = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: lowerCAmelCase__ : Optional[int] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: lowerCAmelCase__ : Any = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: lowerCAmelCase__ : Optional[int] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: lowerCAmelCase__ : List[str] = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: lowerCAmelCase__ : str = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: lowerCAmelCase__ : Tuple = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: lowerCAmelCase__ : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: lowerCAmelCase__ : Dict = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: lowerCAmelCase__ : Any = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = list(state_dict.keys() ) lowerCAmelCase__ : Any = {} for key in keys: lowerCAmelCase__ : List[Any] = state_dict.pop(a__ ) lowerCAmelCase__ : Tuple = rename_keys(a__ ) if "in_proj_weight" in key: # split fused qkv proj lowerCAmelCase__ : List[Any] = val[:hidden_size, :] lowerCAmelCase__ : List[str] = val[hidden_size : 2 * hidden_size, :] lowerCAmelCase__ : str = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: lowerCAmelCase__ : int = val else: lowerCAmelCase__ : str = val return state_dict, enc_dec_proj_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if checkpoint == "small": # default config values lowerCAmelCase__ : Union[str, Any] = 1024 lowerCAmelCase__ : Dict = 24 lowerCAmelCase__ : Dict = 16 elif checkpoint == "medium": lowerCAmelCase__ : List[Any] = 1536 lowerCAmelCase__ : Union[str, Any] = 48 lowerCAmelCase__ : List[Any] = 24 elif checkpoint == "large": lowerCAmelCase__ : Dict = 2048 lowerCAmelCase__ : List[str] = 48 lowerCAmelCase__ : Optional[Any] = 32 else: raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) lowerCAmelCase__ : Tuple = MusicgenDecoderConfig( hidden_size=a__ , ffn_dim=hidden_size * 4 , num_hidden_layers=a__ , num_attention_heads=a__ , ) return config @torch.no_grad() def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ): """simple docstring""" lowerCAmelCase__ : List[Any] = MusicGen.get_pretrained(a__ , device=a__ ) lowerCAmelCase__ : str = decoder_config_from_checkpoint(a__ ) lowerCAmelCase__ : List[str] = fairseq_model.lm.state_dict() lowerCAmelCase__ : Any = rename_state_dict( a__ , hidden_size=decoder_config.hidden_size ) lowerCAmelCase__ : int = TaEncoderModel.from_pretrained("""t5-base""" ) lowerCAmelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) lowerCAmelCase__ : str = MusicgenForCausalLM(a__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection lowerCAmelCase__ : Union[str, Any] = decoder.load_state_dict(a__ , strict=a__ ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(a__ ) if len(a__ ) > 0: raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" ) if len(a__ ) > 0: raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model lowerCAmelCase__ : str = MusicgenForConditionalGeneration(text_encoder=a__ , audio_encoder=a__ , decoder=a__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(a__ ) # check we can do a forward pass lowerCAmelCase__ : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) lowerCAmelCase__ : Optional[int] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): lowerCAmelCase__ : int = model(input_ids=a__ , decoder_input_ids=a__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor lowerCAmelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" ) lowerCAmelCase__ : int = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) lowerCAmelCase__ : Tuple = MusicgenProcessor(feature_extractor=a__ , tokenizer=a__ ) # set the appropriate bos/pad token ids lowerCAmelCase__ : List[str] = 2048 lowerCAmelCase__ : str = 2048 # set other default generation config params lowerCAmelCase__ : Any = int(30 * audio_encoder.config.frame_rate ) lowerCAmelCase__ : Dict = True lowerCAmelCase__ : List[Any] = 3.0 if pytorch_dump_folder is not None: Path(a__ ).mkdir(exist_ok=a__ ) logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(a__ ) processor.save_pretrained(a__ ) if repo_id: logger.info(f"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(a__ ) processor.push_to_hub(a__ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) _lowerCAmelCase = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
37
from __future__ import annotations import math def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) SCREAMING_SNAKE_CASE : Dict = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(a__ ) ) ] def UpperCAmelCase_( a__ , a__ ): """simple docstring""" return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(a__ ) ) ] def UpperCAmelCase_( a__ ): """simple docstring""" if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) SCREAMING_SNAKE_CASE : str = len(a__ ) SCREAMING_SNAKE_CASE : Any = matrix_length // 2 SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )] SCREAMING_SNAKE_CASE : Optional[int] = [ [a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ ) ] SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )] SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )] return top_left, top_right, bot_left, bot_right def UpperCAmelCase_( a__ ): """simple docstring""" return len(a__ ), len(matrix[0] ) def UpperCAmelCase_( a__ ): """simple docstring""" print('''\n'''.join(str(a__ ) for line in matrix ) ) def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if matrix_dimensions(a__ ) == (2, 2): return default_matrix_multiplication(a__ , a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ ) SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) ) SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ ) SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) ) SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ ) SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ ) # construct the new matrix from our 4 quadrants SCREAMING_SNAKE_CASE : Optional[Any] = [] for i in range(len(a__ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(a__ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def UpperCAmelCase_( a__ , a__ ): """simple docstring""" if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]: SCREAMING_SNAKE_CASE : Any = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(a__ ) SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ ) SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) ) SCREAMING_SNAKE_CASE : Optional[int] = matrixa SCREAMING_SNAKE_CASE : Tuple = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , a__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ ) # Removing the additional zeros for i in range(0 , a__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , a__ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": a__ : Dict = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
313
0
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class A__ ( a__ ): def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCamelCase , 'width_multiplier' ) ) class A__ : def __init__( self : List[str] , a : List[str] , a : List[Any]=13 , a : Union[str, Any]=64 , a : Optional[int]=2 , a : str=3 , a : Optional[Any]="swish" , a : List[str]=3 , a : int=32 , a : Tuple=0.1 , a : Tuple=0.0_2 , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=10 , a : Optional[Any]=None , a : Dict=0.2_5 , a : Dict=0.0 , a : Tuple=0.0 , ): '''simple docstring''' lowerCAmelCase__ : int = parent lowerCAmelCase__ : List[str] = batch_size lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : Optional[int] = num_channels lowerCAmelCase__ : int = make_divisible(512 * width_multiplier , divisor=8 ) lowerCAmelCase__ : Union[str, Any] = hidden_act lowerCAmelCase__ : int = conv_kernel_size lowerCAmelCase__ : List[str] = output_stride lowerCAmelCase__ : List[str] = classifier_dropout_prob lowerCAmelCase__ : List[Any] = use_labels lowerCAmelCase__ : Dict = is_training lowerCAmelCase__ : Tuple = num_labels lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Any = scope lowerCAmelCase__ : Union[str, Any] = width_multiplier lowerCAmelCase__ : str = ffn_dropout lowerCAmelCase__ : Optional[int] = attn_dropout def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : List[Any] = None if self.use_labels: lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCamelCase ( self : int ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def _lowerCamelCase ( self : List[Any] , a : int , a : Union[str, Any] , a : Dict , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = MobileViTVaModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase__ : List[Any] = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCamelCase ( self : Optional[int] , a : Tuple , a : int , a : int , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.num_labels lowerCAmelCase__ : str = MobileViTVaForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Any , a : List[Any] , a : int , a : Union[str, Any] , a : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.num_labels lowerCAmelCase__ : Dict = MobileViTVaForSemanticSegmentation(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowerCAmelCase__ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCAmelCase__ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = self.prepare_config_and_inputs() lowerCAmelCase__ : Dict = config_and_inputs lowerCAmelCase__ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A__ ( a__ , a__ , unittest.TestCase ): lowercase = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) lowercase = ( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = MobileViTVaModelTester(self ) lowerCAmelCase__ : str = MobileViTVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def _lowerCamelCase ( self : int ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _lowerCamelCase ( self : int ): '''simple docstring''' pass def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = model_class(_lowerCamelCase ) lowerCAmelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : str = [*signature.parameters.keys()] lowerCAmelCase__ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(a : Optional[Any] , a : List[Any] , a : Optional[Any] ): lowerCAmelCase__ : Optional[int] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): lowerCAmelCase__ : str = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) lowerCAmelCase__ : List[str] = outputs.hidden_states lowerCAmelCase__ : List[Any] = 5 self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCAmelCase__ : str = 2 for i in range(len(_lowerCamelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : str = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase ) @slow def _lowerCamelCase ( self : str ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : int = MobileViTVaModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def lowerCAmelCase__ ( ) -> Any: lowerCAmelCase__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : List[Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( _lowerCamelCase ) lowerCAmelCase__ : Optional[int] = self.default_image_processor lowerCAmelCase__ : Optional[Any] = prepare_img() lowerCAmelCase__ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(**_lowerCamelCase ) # verify the logits lowerCAmelCase__ : int = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) lowerCAmelCase__ : List[str] = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Tuple = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowerCAmelCase__ : Optional[Any] = model.to(_lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowerCAmelCase__ : Any = prepare_img() lowerCAmelCase__ : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : List[str] = model(**_lowerCamelCase ) lowerCAmelCase__ : Any = outputs.logits # verify the logits lowerCAmelCase__ : Tuple = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , _lowerCamelCase ) lowerCAmelCase__ : List[Any] = torch.tensor( [ [[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]], [[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]], [[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]], ] , device=_lowerCamelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowerCAmelCase__ : Optional[int] = model.to(_lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : int = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Dict = model(**_lowerCamelCase ) lowerCAmelCase__ : List[Any] = outputs.logits.detach().cpu() lowerCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(50, 60)] ) lowerCAmelCase__ : Tuple = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , _lowerCamelCase ) lowerCAmelCase__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase ) lowerCAmelCase__ : str = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , _lowerCamelCase )
212
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''') class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any: SCREAMING_SNAKE_CASE : str = scheduler SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers] SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer SCREAMING_SNAKE_CASE : List[str] = GradientState() def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes for _ in range(_lowerCamelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) else: self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Union[str, Any]: return self.scheduler.get_last_lr() def __lowerCAmelCase ( self ) ->List[str]: return self.scheduler.state_dict() def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: self.scheduler.load_state_dict(_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: return self.scheduler.get_lr() def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]: return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
313
0
'''simple docstring''' def _A ( _lowerCAmelCase ): """simple docstring""" __lowercase =len(a__ ) for _ in range(a__ ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: __lowercase =arr[i + 1], arr[i] return arr if __name__ == "__main__": lowerCamelCase = list(range(10, 0, -1)) print(f"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
166
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a__ : Optional[Any] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['''memory_attention''', '''encoder_attn'''], ['''attention''', '''attn'''], ['''/''', '''.'''], ['''.LayerNorm.gamma''', '''_layer_norm.weight'''], ['''.LayerNorm.beta''', '''_layer_norm.bias'''], ['''r.layer_''', '''r.layers.'''], ['''output_proj''', '''out_proj'''], ['''ffn.dense_1.''', '''fc2.'''], ['''ffn.dense.''', '''fc1.'''], ['''ffn_layer_norm''', '''final_layer_norm'''], ['''kernel''', '''weight'''], ['''encoder_layer_norm.''', '''encoder.layer_norm.'''], ['''decoder_layer_norm.''', '''decoder.layer_norm.'''], ['''embeddings.weights''', '''shared.weight'''], ] def UpperCAmelCase_( a__ ): """simple docstring""" for pegasus_name, hf_name in PATTERNS: SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ ) return k def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy() cfg_kwargs.update(a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ ) SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ ) SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict() SCREAMING_SNAKE_CASE : List[str] = {} for k, v in tf_weights.items(): SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ ) if new_k not in sd: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: SCREAMING_SNAKE_CASE : Dict = v.T SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) SCREAMING_SNAKE_CASE : int = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**a__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ): """simple docstring""" SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ ) SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ): SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name ) if skip_key: continue SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ ) SCREAMING_SNAKE_CASE : Any = array return tf_weights def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings'''] SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(a__ ) # convert model SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ ) SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""] if dataset == "large": SCREAMING_SNAKE_CASE : int = task_specific_params SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ ) torch_model.save_pretrained(a__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') a__ : List[str] = parser.parse_args() if args.save_dir is None: a__ : Any = Path(args.tf_ckpt_path).parent.name a__ : int = os.path.join('''pegasus''', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
313
0
"""simple docstring""" import argparse import os import re lowercase__ = '''src/transformers''' # Pattern that looks at the indentation in a line. lowercase__ = re.compile(R"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowercase__ = re.compile(R"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowercase__ = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowercase__ = re.compile(R"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowercase__ = re.compile(R"""\[([^\]]+)\]""") def _snake_case ( lowercase__ ): _lowerCamelCase : Tuple = _re_indent.search(a__ ) return "" if search is None else search.groups()[0] def _snake_case ( lowercase__ , lowercase__="" , lowercase__=None , lowercase__=None ): _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : int = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(a__ ): index += 1 _lowerCamelCase : str = ['''\n'''.join(lines[:index] )] else: _lowerCamelCase : List[Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCamelCase : Optional[Any] = [lines[index]] index += 1 while index < len(a__ ) and (end_prompt is None or not lines[index].startswith(a__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(a__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(a__ ) ) if index < len(a__ ) - 1: _lowerCamelCase : List[str] = [lines[index + 1]] index += 1 else: _lowerCamelCase : Union[str, Any] = [] else: blocks.append('\n'.join(a__ ) ) _lowerCamelCase : Union[str, Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(a__ ) > 0: blocks.append('\n'.join(a__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(a__ ): blocks.append('\n'.join(lines[index:] ) ) return blocks def _snake_case ( lowercase__ ): def _inner(lowercase__ ): return key(a__ ).lower().replace('_' , '' ) return _inner def _snake_case ( lowercase__ , lowercase__=None ): def noop(lowercase__ ): return x if key is None: _lowerCamelCase : List[Any] = noop # Constants are all uppercase, they go first. _lowerCamelCase : Union[str, Any] = [obj for obj in objects if key(a__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCamelCase : int = [obj for obj in objects if key(a__ )[0].isupper() and not key(a__ ).isupper()] # Functions begin with a lowercase, they go last. _lowerCamelCase : Dict = [obj for obj in objects if not key(a__ )[0].isupper()] _lowerCamelCase : Optional[int] = ignore_underscore(a__ ) return sorted(a__ , key=a__ ) + sorted(a__ , key=a__ ) + sorted(a__ , key=a__ ) def _snake_case ( lowercase__ ): def _replace(lowercase__ ): _lowerCamelCase : List[Any] = match.groups()[0] if "," not in imports: return f'''[{imports}]''' _lowerCamelCase : Optional[int] = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCamelCase : List[Any] = keys[:-1] return "[" + ", ".join([f'''\"{k}\"''' for k in sort_objects(a__ )] ) + "]" _lowerCamelCase : Dict = import_statement.split('\n' ) if len(a__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCamelCase : Dict = 2 if lines[1].strip() == '''[''' else 1 _lowerCamelCase : Dict = [(i, _re_strip_line.search(a__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCamelCase : Union[str, Any] = sort_objects(a__ , key=lambda lowercase__ : x[1] ) _lowerCamelCase : int = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(a__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCamelCase : str = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCamelCase : Any = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCamelCase : Dict = keys[:-1] _lowerCamelCase : List[str] = get_indent(lines[1] ) + ''', '''.join([f'''\"{k}\"''' for k in sort_objects(a__ )] ) return "\n".join(a__ ) else: # Finally we have to deal with imports fitting on one line _lowerCamelCase : str = _re_bracket_content.sub(_replace , a__ ) return import_statement def _snake_case ( lowercase__ , lowercase__=True ): with open(a__ , encoding='utf-8' ) as f: _lowerCamelCase : List[Any] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCamelCase : Tuple = split_code_in_indented_blocks( a__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(a__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCamelCase : str = main_blocks[block_idx] _lowerCamelCase : str = block.split('\n' ) # Get to the start of the imports. _lowerCamelCase : str = 0 while line_idx < len(a__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCamelCase : Tuple = len(a__ ) else: line_idx += 1 if line_idx >= len(a__ ): continue # Ignore beginning and last line: they don't contain anything. _lowerCamelCase : Optional[Any] = '''\n'''.join(block_lines[line_idx:-1] ) _lowerCamelCase : Optional[Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCamelCase : Optional[Any] = split_code_in_indented_blocks(a__ , indent_level=a__ ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCamelCase : Optional[Any] = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCamelCase : Dict = [(pattern.search(a__ ).groups()[0] if pattern.search(a__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCamelCase : Any = [(i, key) for i, key in enumerate(a__ ) if key is not None] _lowerCamelCase : Union[str, Any] = [x[0] for x in sorted(a__ , key=lambda lowercase__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : str = [] for i in range(len(a__ ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCamelCase : int = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(a__ ) count += 1 # And we put our main block back together with its first and last line. _lowerCamelCase : Union[str, Any] = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(a__ ): if check_only: return True else: print(f'''Overwriting {file}.''' ) with open(a__ , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(a__ ) ) def _snake_case ( lowercase__=True ): _lowerCamelCase : Tuple = [] for root, _, files in os.walk(a__ ): if "__init__.py" in files: _lowerCamelCase : str = sort_imports(os.path.join(a__ , '__init__.py' ) , check_only=a__ ) if result: _lowerCamelCase : str = [os.path.join(a__ , '__init__.py' )] if len(a__ ) > 0: raise ValueError(f'''Would overwrite {len(a__ )} files, run `make style`.''' ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowercase__ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
96
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline __SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image'] __SCREAMING_SNAKE_CASE : int = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', ] __SCREAMING_SNAKE_CASE : int = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->int: return 32 @property def __lowerCAmelCase ( self ) ->List[str]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Tuple: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 100 @property def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = text_encoder.eval() return text_encoder @property def __lowerCAmelCase ( self ) ->Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->List[str]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq SCREAMING_SNAKE_CASE : Optional[Any] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str: SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : str = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Dict = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Optional[int] = np.array( [0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : Dict = pipeline( _lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Tuple = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
'''simple docstring''' import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed UpperCAmelCase_ = logging.getLogger(__name__) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : Optional[Any] = 10 , SCREAMING_SNAKE_CASE__ : int = 2 ): '''simple docstring''' def get_dataset(SCREAMING_SNAKE_CASE__ : Any ): UpperCAmelCase__ = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(a__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) UpperCAmelCase__ = get_dataset(a__ ) UpperCAmelCase__ = get_dataset(a__ ) UpperCAmelCase__ = DataLoader(a__ , shuffle=a__ , batch_size=a__ , num_workers=4 ) UpperCAmelCase__ = DataLoader(a__ , shuffle=a__ , batch_size=a__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=None ): '''simple docstring''' UpperCAmelCase__ = [] for epoch in range(a__ ): # Train quickly model.train() for batch in dataloader: UpperCAmelCase__ = batch UpperCAmelCase__ = model(a__ ) UpperCAmelCase__ = torch.nn.functional.mse_loss(a__ , a__ ) accelerator.backward(a__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : str ): """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Parameter(torch.randn(1 ) ) UpperCAmelCase__ = nn.Parameter(torch.randn(1 ) ) def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Optional[int] ): """simple docstring""" return x * self.a + self.b class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase__ = DummyModel() UpperCAmelCase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase__ = dummy_dataloaders() UpperCAmelCase__ = ProjectConfiguration(total_limit=1 , project_dir=_lowerCamelCase , automatic_checkpoint_naming=_lowerCamelCase ) # Train baseline UpperCAmelCase__ = Accelerator(project_config=_lowerCamelCase ) UpperCAmelCase__ = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase__ = DummyModel() UpperCAmelCase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase__ = dummy_dataloaders() # Train baseline UpperCAmelCase__ = Accelerator() UpperCAmelCase__ = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save initial UpperCAmelCase__ = os.path.join(_lowerCamelCase , """initial""" ) accelerator.save_state(_lowerCamelCase ) (UpperCAmelCase__) = model.a.item(), model.b.item() UpperCAmelCase__ = optimizer.state_dict() UpperCAmelCase__ = train(3 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) (UpperCAmelCase__) = model.a.item(), model.b.item() UpperCAmelCase__ = optimizer.state_dict() # Train partially set_seed(42 ) UpperCAmelCase__ = DummyModel() UpperCAmelCase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase__ = dummy_dataloaders() UpperCAmelCase__ = Accelerator() UpperCAmelCase__ = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) accelerator.load_state(_lowerCamelCase ) (UpperCAmelCase__) = model.a.item(), model.b.item() UpperCAmelCase__ = optimizer.state_dict() self.assertEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) UpperCAmelCase__ = train(2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save everything UpperCAmelCase__ = os.path.join(_lowerCamelCase , """checkpoint""" ) accelerator.save_state(_lowerCamelCase ) # Load everything back in and make sure all states work accelerator.load_state(_lowerCamelCase ) test_rands += train(1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) (UpperCAmelCase__) = model.a.item(), model.b.item() UpperCAmelCase__ = optimizer.state_dict() self.assertEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase__ = DummyModel() UpperCAmelCase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase__ = dummy_dataloaders() UpperCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=_lowerCamelCase ) # Train baseline UpperCAmelCase__ = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase ) UpperCAmelCase__ = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save initial accelerator.save_state() (UpperCAmelCase__) = model.a.item(), model.b.item() UpperCAmelCase__ = optimizer.state_dict() UpperCAmelCase__ = train(3 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) (UpperCAmelCase__) = model.a.item(), model.b.item() UpperCAmelCase__ = optimizer.state_dict() # Train partially set_seed(42 ) UpperCAmelCase__ = DummyModel() UpperCAmelCase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase__ = dummy_dataloaders() UpperCAmelCase__ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_lowerCamelCase ) UpperCAmelCase__ = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase ) UpperCAmelCase__ = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) accelerator.load_state(os.path.join(_lowerCamelCase , """checkpoints""" , """checkpoint_0""" ) ) (UpperCAmelCase__) = model.a.item(), model.b.item() UpperCAmelCase__ = optimizer.state_dict() self.assertEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) UpperCAmelCase__ = train(2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_lowerCamelCase , """checkpoints""" , """checkpoint_1""" ) ) test_rands += train(1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) (UpperCAmelCase__) = model.a.item(), model.b.item() UpperCAmelCase__ = optimizer.state_dict() self.assertEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" UpperCAmelCase__ = torch.tensor([1, 2, 3] ) UpperCAmelCase__ = torch.tensor([2, 3, 4] ) UpperCAmelCase__ = DummyModel() UpperCAmelCase__ = torch.optim.Adam(net.parameters() ) UpperCAmelCase__ = Accelerator() with self.assertRaises(_lowerCamelCase ) as ve: accelerator.register_for_checkpointing(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) UpperCAmelCase__ = str(ve.exception ) self.assertTrue("""Item at index 0""" in message ) self.assertTrue("""Item at index 1""" in message ) self.assertFalse("""Item at index 2""" in message ) self.assertFalse("""Item at index 3""" in message ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase__ = DummyModel() UpperCAmelCase__ = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) UpperCAmelCase__ = torch.optim.lr_scheduler.StepLR(_lowerCamelCase , step_size=1 , gamma=0.99 ) UpperCAmelCase__ = dummy_dataloaders() UpperCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=_lowerCamelCase ) # Train baseline UpperCAmelCase__ = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase ) UpperCAmelCase__ = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save initial accelerator.save_state() UpperCAmelCase__ = scheduler.state_dict() train(3 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) self.assertNotEqual(_lowerCamelCase , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_lowerCamelCase , """checkpoints""" , """checkpoint_0""" ) ) self.assertEqual(_lowerCamelCase , scheduler.state_dict() ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) UpperCAmelCase__ = DummyModel() UpperCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=_lowerCamelCase , total_limit=2 ) # Train baseline UpperCAmelCase__ = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase ) UpperCAmelCase__ = accelerator.prepare(_lowerCamelCase ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_lowerCamelCase , """checkpoints""" , """checkpoint_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , """checkpoints""" , """checkpoint_9""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , """checkpoints""" , """checkpoint_10""" ) ) ) @require_cuda def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) if __name__ == "__main__": UpperCAmelCase_ = '''/tmp/accelerate/state_checkpointing''' UpperCAmelCase_ = DummyModel() UpperCAmelCase_ = torch.optim.Adam(params=model.parameters(), lr=1E-3) UpperCAmelCase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) UpperCAmelCase_ = dummy_dataloaders() UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline UpperCAmelCase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) UpperCAmelCase_ = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) UpperCAmelCase_ = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: UpperCAmelCase_ = group['''params'''][0].device break assert param_device.type == accelerator.device.type UpperCAmelCase_ = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: UpperCAmelCase_ = group['''params'''][0].device break assert ( param_device.type == torch.device('cpu').type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: UpperCAmelCase_ = group['''params'''][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
346
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def UpperCAmelCase_( a__ , a__=False ): """simple docstring""" SCREAMING_SNAKE_CASE : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def UpperCAmelCase_( a__ , a__ , a__=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE : Any = '''''' else: SCREAMING_SNAKE_CASE : Optional[int] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :] def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a__ , a__ ) def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(a__ , a__ ) def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = dct.pop(a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = val def UpperCAmelCase_( a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = ViTMSNConfig() SCREAMING_SNAKE_CASE : Optional[int] = 1_000 SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files''' SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json''' SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) ) SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : str = idalabel SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = 384 SCREAMING_SNAKE_CASE : Any = 1_536 SCREAMING_SNAKE_CASE : List[str] = 6 elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : Optional[int] = 1_024 SCREAMING_SNAKE_CASE : Optional[int] = 4_096 SCREAMING_SNAKE_CASE : Tuple = 24 SCREAMING_SNAKE_CASE : Union[str, Any] = 16 SCREAMING_SNAKE_CASE : Dict = 0.1 elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : str = 4 elif "l7" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = 7 SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024 SCREAMING_SNAKE_CASE : List[Any] = 4_096 SCREAMING_SNAKE_CASE : List[Any] = 24 SCREAMING_SNAKE_CASE : Tuple = 16 SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder'''] SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(a__ ) SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ ) for src, dest in rename_keys: rename_key(a__ , a__ , a__ ) read_in_q_k_v(a__ , a__ , base_model=a__ ) model.load_state_dict(a__ ) model.eval() SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw ) SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor( size=config.image_size , image_mean=a__ , image_std=a__ ) SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) SCREAMING_SNAKE_CASE : Tuple = model(**a__ ) SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(a__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a__ : Any = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
313
0
import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification lowerCAmelCase = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co lowerCAmelCase = '''main''' # Default branch name lowerCAmelCase = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) lowerCAmelCase = '''aaaaaaa''' # This commit does not exist, so we should 404. lowerCAmelCase = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes lowerCAmelCase = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def _a ( ): """simple docstring""" print('''Welcome!''' ) yield print('''Bye!''' ) @contextlib.contextmanager def _a ( ): """simple docstring""" print('''Bonjour!''' ) yield print('''Au revoir!''' ) class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" assert transformers.__spec__ is not None assert importlib.util.find_spec('''transformers''' ) is not None class _a ( unittest.TestCase ): @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int ) -> Any: """simple docstring""" with ContextManagers([] ): print('''Transformers are awesome!''' ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> Union[str, Any]: """simple docstring""" with ContextManagers([context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple ) -> Tuple: """simple docstring""" with ContextManagers([context_fr(), context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' ) @require_torch def lowerCamelCase_ ( self: int ) -> Tuple: """simple docstring""" self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] ) class _a ( a__ ): pass self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] ) @require_tf def lowerCamelCase_ ( self: str ) -> Any: """simple docstring""" self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] ) class _a ( a__ ): pass self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] ) @require_flax def lowerCamelCase_ ( self: Optional[int] ) -> str: """simple docstring""" self.assertEqual(find_labels(_lowerCamelCase ) , [] ) self.assertEqual(find_labels(_lowerCamelCase ) , [] ) self.assertEqual(find_labels(_lowerCamelCase ) , [] ) class _a ( a__ ): pass self.assertEqual(find_labels(_lowerCamelCase ) , [] )
110
import csv import tweepy # Twitter API credentials a__ : Union[str, Any] = '''''' a__ : List[str] = '''''' a__ : Any = '''''' a__ : List[str] = '''''' def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ ) auth.set_access_token(a__ , a__ ) SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ ) # initialize a list to hold all the tweepy Tweets SCREAMING_SNAKE_CASE : Any = [] # make initial request for most recent tweets (200 is the maximum allowed count) SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 ) # save most recent tweets alltweets.extend(a__ ) # save the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(a__ ) > 0: print(F"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates SCREAMING_SNAKE_CASE : Any = api.user_timeline( screen_name=a__ , count=200 , max_id=a__ ) # save most recent tweets alltweets.extend(a__ ) # update the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1 print(F"""...{len(a__ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(a__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
313
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( a__ ): """simple docstring""" UpperCAmelCase_ =['input_features', 'is_longer'] def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> int: super().__init__( feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , ) SCREAMING_SNAKE_CASE_ = top_db SCREAMING_SNAKE_CASE_ = truncation SCREAMING_SNAKE_CASE_ = padding SCREAMING_SNAKE_CASE_ = fft_window_size SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1 SCREAMING_SNAKE_CASE_ = hop_length SCREAMING_SNAKE_CASE_ = max_length_s SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate SCREAMING_SNAKE_CASE_ = sampling_rate SCREAMING_SNAKE_CASE_ = frequency_min SCREAMING_SNAKE_CASE_ = frequency_max SCREAMING_SNAKE_CASE_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCamelCase , min_frequency=_lowerCamelCase , max_frequency=_lowerCamelCase , sampling_rate=_lowerCamelCase , norm=_lowerCamelCase , mel_scale='''htk''' , ) SCREAMING_SNAKE_CASE_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCamelCase , min_frequency=_lowerCamelCase , max_frequency=_lowerCamelCase , sampling_rate=_lowerCamelCase , norm='''slaney''' , mel_scale='''slaney''' , ) def _UpperCamelCase ( self ) -> Dict[str, Any]: SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray: SCREAMING_SNAKE_CASE_ = spectrogram( _lowerCamelCase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowerCamelCase , log_mel='''dB''' , ) return log_mel_spectrogram.T def _UpperCamelCase ( self , _A , _A , _A ) -> List[Any]: SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE_ = [0] # randomly choose index for each part SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] ) SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] ) SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] ) SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :] SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :] SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :] SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] ) SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate( _lowerCamelCase , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy() SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": SCREAMING_SNAKE_CASE_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad SCREAMING_SNAKE_CASE_ = len(_lowerCamelCase ) - max_length SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 ) SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length] SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters_slaney )[None, :] elif truncation == "fusion": SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters ) SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed SCREAMING_SNAKE_CASE_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 ) SCREAMING_SNAKE_CASE_ = False else: SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: SCREAMING_SNAKE_CASE_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": SCREAMING_SNAKE_CASE_ = int(max_length / len(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_lowerCamelCase , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": SCREAMING_SNAKE_CASE_ = int(max_length / len(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_lowerCamelCase , _lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ = np.pad(_lowerCamelCase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters ) SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_lowerCamelCase , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature: SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation SCREAMING_SNAKE_CASE_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) SCREAMING_SNAKE_CASE_ = isinstance(_lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) SCREAMING_SNAKE_CASE_ = is_batched_numpy or ( isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ): SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCamelCase , dtype=np.floataa ) elif isinstance(_lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCamelCase )] # convert to mel spectrogram, truncate and pad if needed. SCREAMING_SNAKE_CASE_ = [ self._get_input_mel(_lowerCamelCase , max_length if max_length else self.nb_max_samples , _lowerCamelCase , _lowerCamelCase ) for waveform in raw_speech ] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for mel, longer in padded_inputs: input_mel.append(_lowerCamelCase ) is_longer.append(_lowerCamelCase ) if truncation == "fusion" and sum(_lowerCamelCase ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE_ = True if isinstance(input_mel[0] , _lowerCamelCase ): SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer] SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer} SCREAMING_SNAKE_CASE_ = BatchFeature(_lowerCamelCase ) if return_tensors is not None: SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_lowerCamelCase ) return input_features
299
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : List[str] = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = 'ibert' def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any: super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Dict = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = position_embedding_type SCREAMING_SNAKE_CASE : Optional[int] = quant_mode SCREAMING_SNAKE_CASE : Dict = force_dequant class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
313
0
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : Dict = [] lowercase__ : Tuple = [] lowercase__ : str = { '''^''': 3, '''*''': 2, '''/''': 2, '''%''': 2, '''+''': 1, '''-''': 1, } # Priority of each operator lowercase__ : Optional[int] = len(a__ ) if (len(a__ ) > 7) else 7 # Print table header for output print( '''Symbol'''.center(8 ) , '''Stack'''.center(a__ ) , '''Postfix'''.center(a__ ) , sep=''' | ''' , ) print('''-''' * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(a__ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(a__ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(a__ ) == 0: stack.append(a__ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(a__ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(a__ ) # push x to stack print( x.center(8 ) , (''''''.join(a__ )).ljust(a__ ) , (''''''.join(a__ )).ljust(a__ ) , sep=''' | ''' , ) # Output in tabular format while len(a__ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( ''' '''.center(8 ) , (''''''.join(a__ )).ljust(a__ ) , (''''''.join(a__ )).ljust(a__ ) , sep=''' | ''' , ) # Output in tabular format return "".join(a__ ) # return Postfix as str def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : int = list(infix[::-1] ) # reverse the infix equation for i in range(len(a__ ) ): if infix[i] == "(": lowercase__ : List[Any] = ''')''' # change "(" to ")" elif infix[i] == ")": lowercase__ : int = '''(''' # change ")" to "(" return (infix_2_postfix(''''''.join(a__ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": __a: Optional[Any] = input("""\nEnter an Infix Equation = """) # Input an Infix equation __a: Tuple = ''''''.join(Infix.split()) # Remove spaces from the input print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
198
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType a__ : Any = logging.get_logger(__name__) a__ : Dict = { '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = 'imagegpt' __SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values'] __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str: SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = n_positions SCREAMING_SNAKE_CASE : Optional[int] = n_embd SCREAMING_SNAKE_CASE : List[Any] = n_layer SCREAMING_SNAKE_CASE : List[Any] = n_head SCREAMING_SNAKE_CASE : int = n_inner SCREAMING_SNAKE_CASE : Dict = activation_function SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop SCREAMING_SNAKE_CASE : Dict = embd_pdrop SCREAMING_SNAKE_CASE : List[str] = attn_pdrop SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : int = scale_attn_weights SCREAMING_SNAKE_CASE : Optional[int] = use_cache SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase ) class a_ ( a__ ): """simple docstring""" @property def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]: SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) ) return inputs
313
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def UpperCAmelCase_ ( __lowerCamelCase : int ): lowercase_ :Any = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(a__ ,a__ ) def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ): lowercase_ :List[str] = emb.weight.shape lowercase_ :str = nn.Linear(a__ ,a__ ,bias=a__ ) lowercase_ :Optional[Any] = emb.weight.data return lin_layer def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ): lowercase_ :List[Any] = torch.load(a__ ,map_location="cpu" ) lowercase_ :Union[str, Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] lowercase_ :Optional[Any] = mam_aaa['''model'''] remove_ignore_keys_(a__ ) lowercase_ :int = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowercase_ :List[str] = MaMaaaConfig( vocab_size=a__ ,max_position_embeddings=10_24 ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,encoder_layerdrop=args.encoder_layerdrop ,decoder_layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="relu" ,) lowercase_ :Optional[int] = state_dict['''decoder.embed_tokens.weight'''] lowercase_ :Dict = MaMaaaForConditionalGeneration(a__ ) model.model.load_state_dict(a__ ,strict=a__ ) lowercase_ :List[str] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCAmelCase : Dict =parser.parse_args() lowerCAmelCase : Union[str, Any] =convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
223
from maths.prime_check import is_prime def UpperCAmelCase_( a__ ): """simple docstring""" if not isinstance(a__ , a__ ): SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer""" raise TypeError(a__ ) if is_prime(a__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
313
0
"""simple docstring""" def lowercase__ ( _UpperCAmelCase ) -> str: '''simple docstring''' lowercase : List[Any] = 1 lowercase : Any = 2 while i * i <= n: lowercase : str = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def lowercase__ ( ) -> str: '''simple docstring''' lowercase : Any = 1 lowercase : List[Any] = 1 while True: i += 1 t_num += i if count_divisors(a__ ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
255
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline __SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[str] = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->Optional[Any]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 32 @property def __lowerCAmelCase ( self ) ->str: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Dict: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Tuple: return 100 @property def __lowerCAmelCase ( self ) ->int: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->Any: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : str = self.dummy_unet SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq SCREAMING_SNAKE_CASE : List[str] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int: SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) # create hint SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) ) SCREAMING_SNAKE_CASE : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0 SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo''' SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : List[str] = pipeline( image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png''' lowercase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' ) return image def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ): '''simple docstring''' lowercase = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : Any , __snake_case : Union[str, Any] ): '''simple docstring''' lowercase = dct.pop(a__ ) lowercase = val def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : str ): '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowercase = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' ) lowercase = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' ) # next, set bias in the state dict lowercase = torch.cat((q_bias, torch.zeros_like(a__ , requires_grad=a__ ), v_bias) ) lowercase = qkv_bias def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : List[str] ): '''simple docstring''' lowercase = 3_64 if '''coco''' in model_name else 2_24 lowercase = BlipaVisionConfig(image_size=a__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: lowercase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=a__ ).to_dict() elif "opt-6.7b" in model_name: lowercase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=a__ ).to_dict() elif "t5-xl" in model_name: lowercase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowercase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() lowercase = BlipaConfig(vision_config=a__ , text_config=a__ ) return config, image_size @torch.no_grad() def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : Any=None , __snake_case : Tuple=False ): '''simple docstring''' lowercase = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if '''opt''' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) lowercase = tokenizer('\n' , add_special_tokens=a__ ).input_ids[0] lowercase = get_blipa_config(a__ , eos_token_id=a__ ) lowercase = BlipaForConditionalGeneration(a__ ).eval() lowercase = { '''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''), '''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''), '''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''), '''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''), '''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''), '''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''), '''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''), } lowercase = model_name_to_original[model_name] # load original model print('Loading original model...' ) lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu''' lowercase = load_model_and_preprocess( name=a__ , model_type=a__ , is_eval=a__ , device=a__ ) original_model.eval() print('Done!' ) # update state dict keys lowercase = original_model.state_dict() lowercase = create_rename_keys(a__ ) for src, dest in rename_keys: rename_key(a__ , a__ , a__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowercase = state_dict.pop(a__ ) if key.startswith('Qformer.bert' ): lowercase = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: lowercase = key.replace('self' , 'attention' ) if "opt_proj" in key: lowercase = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: lowercase = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): lowercase = key.replace('opt' , 'language' ) if key.startswith('t5' ): lowercase = key.replace('t5' , 'language' ) lowercase = val # read in qv biases read_in_q_v_bias(a__ , a__ ) lowercase = hf_model.load_state_dict(a__ , strict=a__ ) assert len(a__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] lowercase = load_demo_image() lowercase = vis_processors['''eval'''](a__ ).unsqueeze(0 ).to(a__ ) lowercase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(a__ ) # create processor lowercase = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=a__ , image_std=a__ ) lowercase = BlipaProcessor(image_processor=a__ , tokenizer=a__ ) lowercase = processor(images=a__ , return_tensors='pt' ).pixel_values.to(a__ ) # make sure processor creates exact same pixel values assert torch.allclose(a__ , a__ ) original_model.to(a__ ) hf_model.to(a__ ) with torch.no_grad(): if "opt" in model_name: lowercase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits lowercase = hf_model(a__ , a__ ).logits else: lowercase = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits lowercase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) lowercase = hf_model(a__ , a__ , labels=a__ ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": lowercase = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=a__ ) assert torch.allclose(logits[0, :3, :3] , a__ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": lowercase = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=a__ ) else: # cast to same type lowercase = logits.dtype assert torch.allclose(original_logits.to(a__ ) , a__ , atol=1e-2 ) print('Looks ok!' ) print('Generating a caption...' ) lowercase = '''''' lowercase = tokenizer(a__ , return_tensors='pt' ).input_ids.to(a__ ) lowercase = original_model.generate({'image': original_pixel_values} ) lowercase = hf_model.generate( a__ , a__ , do_sample=a__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , a__ ) lowercase = input_ids.shape[1] lowercase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=a__ ) lowercase = [text.strip() for text in output_text] print('HF generation:' , a__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(a__ ) hf_model.save_pretrained(a__ ) if push_to_hub: processor.push_to_hub(f'nielsr/{model_name}' ) hf_model.push_to_hub(f'nielsr/{model_name}' ) if __name__ == "__main__": _UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() _UpperCamelCase : List[str] = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '--model_name', default='blip2-opt-2.7b', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) _UpperCamelCase : List[Any] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
220
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker a__ : List[str] = '''CompVis/stable-diffusion-v1-1''' a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2''' a__ : Any = '''CompVis/stable-diffusion-v1-3''' a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4''' class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str: super()._init_() SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline( vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def __lowerCAmelCase ( self ) ->Dict[str, Any]: return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )} def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[str]: self.enable_attention_slicing(_lowerCamelCase ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]: return self.pipea( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) @torch.no_grad() def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict: SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(_lowerCamelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a( prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
313
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def _SCREAMING_SNAKE_CASE ( UpperCamelCase=None ): """simple docstring""" if subparsers is not None: lowerCAmelCase__ : List[str] = subparsers.add_parser("""test""" ) else: lowerCAmelCase__ : List[str] = argparse.ArgumentParser("""Accelerate test command""" ) parser.add_argument( """--config_file""" , default=a__ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """ """such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """ """with \'huggingface\'.""" ) , ) if subparsers is not None: parser.set_defaults(func=a__ ) return parser def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] ) if args.config_file is None: lowerCAmelCase__ : Union[str, Any] = script_name else: lowerCAmelCase__ : List[str] = f"""--config_file={args.config_file} {script_name}""" lowerCAmelCase__ : str = ['''accelerate-launch'''] + test_args.split() lowerCAmelCase__ : Optional[int] = execute_subprocess_async(a__ , env=os.environ.copy() ) if result.returncode == 0: print("""Test is a success! You are ready for your distributed training!""" ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : List[str] = test_command_parser() lowerCAmelCase__ : Tuple = parser.parse_args() test_command(a__ ) if __name__ == "__main__": main()
37
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : jnp.ndarray @flax_register_to_config class a_ ( nn.Module , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 32 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : int = 4 __SCREAMING_SNAKE_CASE : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") __SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False __SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280) __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8 __SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None __SCREAMING_SNAKE_CASE : int = 1280 __SCREAMING_SNAKE_CASE : float = 0.0 __SCREAMING_SNAKE_CASE : bool = False __SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa __SCREAMING_SNAKE_CASE : bool = True __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : bool = False def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict: # init input tensors SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size) SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa ) SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"] def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim # input SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types ) # down SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): SCREAMING_SNAKE_CASE : str = output_channel SCREAMING_SNAKE_CASE : int = block_out_channels[i] SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = down_blocks # mid SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): SCREAMING_SNAKE_CASE : Dict = output_channel SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i] SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )] SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1 if up_block_type == "CrossAttnUpBlock2D": SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD( in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = output_channel SCREAMING_SNAKE_CASE : Tuple = up_blocks # out SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) SCREAMING_SNAKE_CASE : Any = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]: # 1. time if not isinstance(_lowerCamelCase , jnp.ndarray ): SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 ) SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase ) # 2. pre-process SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) ) SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase ) # 3. down SCREAMING_SNAKE_CASE : Optional[int] = (sample,) for down_block in self.down_blocks: if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: SCREAMING_SNAKE_CASE : int = () for down_block_res_sample, down_block_additional_residual in zip( _lowerCamelCase , _lowerCamelCase ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples # 4. mid SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :] SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : str = up_block( _lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , ) else: SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train ) # 6. post-process SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
313
0
from __future__ import annotations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: if nth_term == "": return [""] lowerCAmelCase__ : Optional[Any] = int(a__ ) lowerCAmelCase__ : Union[str, Any] = int(a__ ) lowerCAmelCase__ : list[str] = [] for temp in range(int(a__ ) ): series.append(F'''1 / {pow(temp + 1 , int(a__ ) )}''' if series else '1' ) return series if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ = int(input("""Enter the last number (nth term) of the P-Series""")) lowerCamelCase__ = int(input("""Enter the power for P-Series""")) print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""") print(p_series(nth_term, power))
212
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a_ ( a__ , a__ , a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline __SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS __SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __SCREAMING_SNAKE_CASE : Any = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : Tuple = frozenset([] ) def __lowerCAmelCase ( self ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : Optional[Any] = 32 SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size # image encoding components SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler( beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL() SCREAMING_SNAKE_CASE : Optional[Any] = { # image encoding components '''feature_extractor''': feature_extractor, '''image_encoder''': image_encoder.eval(), # image noising components '''image_normalizer''': image_normalizer.eval(), '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder.eval(), '''unet''': unet.eval(), '''scheduler''': scheduler, '''vae''': vae.eval(), } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]: if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if pil_image: SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase ) inputs.update({'''image_embeds''': None} ) SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps'''] self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) ->Optional[int]: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' ) SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' ) SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[Any]: SCREAMING_SNAKE_CASE : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) SCREAMING_SNAKE_CASE : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' ) SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' ) SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained( '''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE : Dict = pipe( _lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
313
0
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def _A ( _lowerCAmelCase = 1_000_000 , _lowerCAmelCase = 10 ): """simple docstring""" __lowercase =defaultdict(a__ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __lowercase =max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __lowercase =1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(a__ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f"{solution() = }")
166
from abc import ABC, abstractmethod from typing import List, Optional class a_ ( a__ ): """simple docstring""" def __init__( self ) ->List[str]: # test for the above condition self.test() def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = False while not completed: if counter == 1: self.reset() SCREAMING_SNAKE_CASE : List[Any] = self.advance() if not self.does_advance(_lowerCamelCase ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase ) counter += 1 if counter > 1_0000: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def __lowerCAmelCase ( self ) ->Optional[int]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self ) ->Optional[Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self ) ->Union[str, Any]: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any: raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase ) ->int: super(_lowerCamelCase , self ).__init__() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" ) if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" ) SCREAMING_SNAKE_CASE : Optional[Any] = token_ids SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids ) SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step SCREAMING_SNAKE_CASE : Any = False def __lowerCAmelCase ( self ) ->List[Any]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : str = False SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[Any] = False if self.does_advance(_lowerCamelCase ): self.fulfilled_idx += 1 SCREAMING_SNAKE_CASE : str = True if self.fulfilled_idx == (self.seqlen - 1): SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : Union[str, Any] = completed else: # failed to make progress. SCREAMING_SNAKE_CASE : Dict = True self.reset() return stepped, completed, reset def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = 0 def __lowerCAmelCase ( self ) ->Any: return self.seqlen - (self.fulfilled_idx + 1) def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict: SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : Dict = self.seqlen SCREAMING_SNAKE_CASE : int = self.fulfilled_idx SCREAMING_SNAKE_CASE : Tuple = self.completed return new_constraint class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict: SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] ) SCREAMING_SNAKE_CASE : List[str] = {} for token_ids in nested_token_ids: SCREAMING_SNAKE_CASE : Optional[Any] = root for tidx, token_id in enumerate(_lowerCamelCase ): if token_id not in level: SCREAMING_SNAKE_CASE : Any = {} SCREAMING_SNAKE_CASE : Tuple = level[token_id] if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' F""" {nested_token_ids}.""" ) SCREAMING_SNAKE_CASE : List[Any] = root def __lowerCAmelCase ( self , _lowerCamelCase ) ->int: SCREAMING_SNAKE_CASE : List[Any] = self.trie for current_token in current_seq: SCREAMING_SNAKE_CASE : int = start[current_token] SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() ) return next_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase ) return len(_lowerCamelCase ) == 0 def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]: SCREAMING_SNAKE_CASE : Any = list(root.values() ) if len(_lowerCamelCase ) == 0: return 1 else: return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict: SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase ) return len(_lowerCamelCase ) != leaf_count class a_ ( a__ ): """simple docstring""" def __init__( self , _lowerCamelCase ) ->str: super(_lowerCamelCase , self ).__init__() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" ) if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ): raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" ) if any( any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" ) SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = nested_token_ids SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Optional[int] = False def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq ) if len(_lowerCamelCase ) == 0: return None else: return token_list def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : Union[str, Any] = False if self.does_advance(_lowerCamelCase ): self.current_seq.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = True else: SCREAMING_SNAKE_CASE : Dict = True self.reset() SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq ) SCREAMING_SNAKE_CASE : List[Any] = completed return stepped, completed, reset def __lowerCAmelCase ( self ) ->Optional[Any]: SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[Any] = [] def __lowerCAmelCase ( self ) ->Optional[Any]: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]: SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids ) if stateful: SCREAMING_SNAKE_CASE : str = self.seqlen SCREAMING_SNAKE_CASE : int = self.current_seq SCREAMING_SNAKE_CASE : Optional[int] = self.completed return new_constraint class a_ : """simple docstring""" def __init__( self , _lowerCamelCase ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : List[Any] = constraints # max # of steps required to fulfill a given constraint SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] ) SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = False self.init_state() def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints] def __lowerCAmelCase ( self ) ->str: SCREAMING_SNAKE_CASE : str = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Tuple = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance() if isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.append(_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.extend(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance() if isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.append(_lowerCamelCase ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): token_list.extend(_lowerCamelCase ) if len(_lowerCamelCase ) == 0: return None else: return token_list def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False if self.completed: SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Optional[int] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Optional[int] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) SCREAMING_SNAKE_CASE : str = None if len(self.pending_constraints ) == 0: # we're done! SCREAMING_SNAKE_CASE : Optional[Any] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_lowerCamelCase ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = None if not complete and stepped: SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. SCREAMING_SNAKE_CASE : str = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str: SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: SCREAMING_SNAKE_CASE : str = [ constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints] return new_state
313
0