code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int = 1_00_00_00 ) -> int: '''simple docstring''' UpperCAmelCase_ = limit + 1 UpperCAmelCase_ = [0] * limit for first_term in range(1 , snake_case_ ): for n in range(snake_case_ , snake_case_ , snake_case_ ): UpperCAmelCase_ = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a UpperCAmelCase_ = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"{solution() = }")
78
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class __A : a__ : int a__ : TreeNode | None = None a__ : TreeNode | None = None SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess') def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int: '''simple docstring''' if root is None: return 0 # Validation def count_nodes(snake_case_ : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(snake_case_ : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(snake_case_ ) != count_coins(snake_case_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left ) UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right ) UpperCAmelCase_ = 1 - left_distrib_excess UpperCAmelCase_ = 1 - right_distrib_excess UpperCAmelCase_ = ( left_distrib_moves + right_distrib_moves + abs(snake_case_ ) + abs(snake_case_ ) ) UpperCAmelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(snake_case_ , snake_case_ ) return get_distrib(snake_case_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
78
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __A ( UpperCamelCase__ ): a__ : List[Any] = """openai/whisper-base""" a__ : Optional[Any] = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) a__ : Optional[Any] = """transcriber""" a__ : List[str] = WhisperProcessor a__ : str = WhisperForConditionalGeneration a__ : List[Any] = ["""audio"""] a__ : Optional[Any] = ["""text"""] def _lowercase (self : Union[str, Any] , __a : int ): return self.pre_processor(__a , return_tensors="pt" ).input_features def _lowercase (self : Optional[Any] , __a : str ): return self.model.generate(inputs=__a ) def _lowercase (self : int , __a : Union[str, Any] ): return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0]
78
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE_: int =logging.getLogger() def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ = parser.parse_args() return args.f def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" ) if os.path.exists(snake_case_ ): with open(snake_case_ , "r" ) as f: UpperCAmelCase_ = json.load(snake_case_ ) else: raise ValueError(f"""can't find {path}""" ) return results def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __A ( UpperCamelCase__ ): @classmethod def _lowercase (cls : Any ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def _lowercase (cls : int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : int ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 28 ) self.assertGreaterEqual(result["eval_exact"] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : str ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_rouge1"] , 10 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : List[str] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_bleu"] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) ) @slow def _lowercase (self : Dict ): UpperCAmelCase_ = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Any ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
78
1
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class __A ( UpperCamelCase__ ): def _lowercase (self : Tuple ): UpperCAmelCase_ = SMALL_MODEL_IDENTIFIER UpperCAmelCase_ = "pt" UpperCAmelCase_ = "tf" def _lowercase (self : List[str] , __a : Any ): UpperCAmelCase_ = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(__a ) def _lowercase (self : Any , __a : Optional[int] ): UpperCAmelCase_ = TFAutoModel.from_pretrained(self.test_model , from_pt=__a ) model_tf.save_pretrained(__a ) def _lowercase (self : int ): UpperCAmelCase_ = "mock_framework" # Framework provided - return whatever the user provides UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model , __a ) self.assertEqual(__a , __a ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) UpperCAmelCase_ = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) UpperCAmelCase_ = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) def _lowercase (self : Any ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) UpperCAmelCase_ = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) UpperCAmelCase_ = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(__a ): UpperCAmelCase_ = FeaturesManager.determine_framework(__a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = MagicMock(return_value=__a ) with patch("transformers.onnx.features.is_tf_available" , __a ): UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # PyTorch not in environment -> use TensorFlow UpperCAmelCase_ = MagicMock(return_value=__a ) with patch("transformers.onnx.features.is_torch_available" , __a ): UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_tf ) # Both in environment -> use PyTorch UpperCAmelCase_ = MagicMock(return_value=__a ) UpperCAmelCase_ = MagicMock(return_value=__a ) with patch("transformers.onnx.features.is_tf_available" , __a ), patch( "transformers.onnx.features.is_torch_available" , __a ): UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # Both not in environment -> raise error UpperCAmelCase_ = MagicMock(return_value=__a ) UpperCAmelCase_ = MagicMock(return_value=__a ) with patch("transformers.onnx.features.is_tf_available" , __a ), patch( "transformers.onnx.features.is_torch_available" , __a ): with self.assertRaises(__a ): UpperCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
78
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP SCREAMING_SNAKE_CASE_: Any =False try: SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab') except ModuleNotFoundError: pass @input.register class __A : def __init__(self : int , __a : str = None , __a : list = [] ): UpperCAmelCase_ = 0 UpperCAmelCase_ = choices UpperCAmelCase_ = prompt if sys.platform == "win32": UpperCAmelCase_ = "*" else: UpperCAmelCase_ = "➔ " def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ): if sys.platform != "win32": writeColor(self.choices[index] , 32 , __a ) else: forceWrite(self.choices[index] , __a ) def _lowercase (self : Any , __a : int ): if index == self.position: forceWrite(f""" {self.arrow_char} """ ) self.write_choice(__a ) else: forceWrite(f""" {self.choices[index]}""" ) reset_cursor() def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ): UpperCAmelCase_ = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__a ) move_cursor(__a , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def _lowercase (self : Dict ): self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def _lowercase (self : Any ): self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def _lowercase (self : Optional[Any] ): move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def _lowercase (self : str ): move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = int(chr(self.current_selection ) ) UpperCAmelCase_ = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , __a ) else: return else: return def _lowercase (self : Optional[Any] , __a : int = 0 ): if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) UpperCAmelCase_ = default_choice for i in range(len(self.choices ) ): self.print_choice(__a ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: UpperCAmelCase_ = int(builtins.input() ) except ValueError: UpperCAmelCase_ = default_choice else: UpperCAmelCase_ = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(__a , "\n" ) return choice
78
1
'''simple docstring''' import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def lowerCAmelCase_ ( snake_case_ : Tuple ) -> List[Any]: '''simple docstring''' if "model" in orig_key: UpperCAmelCase_ = orig_key.replace("model." , "" ) if "norm1" in orig_key: UpperCAmelCase_ = orig_key.replace("norm1" , "attention.output.LayerNorm" ) if "norm2" in orig_key: UpperCAmelCase_ = orig_key.replace("norm2" , "output.LayerNorm" ) if "norm" in orig_key: UpperCAmelCase_ = orig_key.replace("norm" , "LayerNorm" ) if "transformer" in orig_key: UpperCAmelCase_ = orig_key.split("." )[0].split("_" )[-1] UpperCAmelCase_ = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: UpperCAmelCase_ = orig_key.replace("mha.attn" , "attention.self" ) if "mha" in orig_key: UpperCAmelCase_ = orig_key.replace("mha" , "attention" ) if "W_q" in orig_key: UpperCAmelCase_ = orig_key.replace("W_q" , "self.query" ) if "W_k" in orig_key: UpperCAmelCase_ = orig_key.replace("W_k" , "self.key" ) if "W_v" in orig_key: UpperCAmelCase_ = orig_key.replace("W_v" , "self.value" ) if "ff1" in orig_key: UpperCAmelCase_ = orig_key.replace("ff1" , "intermediate.dense" ) if "ff2" in orig_key: UpperCAmelCase_ = orig_key.replace("ff2" , "output.dense" ) if "ff" in orig_key: UpperCAmelCase_ = orig_key.replace("ff" , "output.dense" ) if "mlm_class" in orig_key: UpperCAmelCase_ = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" ) if "mlm" in orig_key: UpperCAmelCase_ = orig_key.replace("mlm" , "cls.predictions.transform" ) if "cls" not in orig_key: UpperCAmelCase_ = "yoso." + orig_key return orig_key def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : int ) -> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase_ = orig_state_dict.pop(snake_case_ ) if ("pooler" in key) or ("sen_class" in key): continue else: UpperCAmelCase_ = val UpperCAmelCase_ = orig_state_dict["cls.predictions.decoder.bias"] UpperCAmelCase_ = torch.arange(snake_case_ ).expand((1, -1) ) + 2 return orig_state_dict def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : int ) -> Any: '''simple docstring''' UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )["model_state_dict"] UpperCAmelCase_ = YosoConfig.from_json_file(snake_case_ ) UpperCAmelCase_ = YosoForMaskedLM(snake_case_ ) UpperCAmelCase_ = convert_checkpoint_helper(config.max_position_embeddings , snake_case_ ) print(model.load_state_dict(snake_case_ ) ) model.eval() model.save_pretrained(snake_case_ ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for YOSO model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
78
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor'] SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =[ 'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: int =[ 'FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
78
1
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME SCREAMING_SNAKE_CASE_: Optional[Any] =['small', 'medium', 'large'] SCREAMING_SNAKE_CASE_: Any ='lm_head.decoder.weight' SCREAMING_SNAKE_CASE_: List[Any] ='lm_head.weight' def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> Dict: '''simple docstring''' UpperCAmelCase_ = torch.load(snake_case_ ) UpperCAmelCase_ = d.pop(snake_case_ ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) torch.save(snake_case_ , os.path.join(snake_case_ , snake_case_ ) ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args() for MODEL in DIALOGPT_MODELS: SCREAMING_SNAKE_CASE_: Dict =os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") SCREAMING_SNAKE_CASE_: Tuple =f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
78
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed SCREAMING_SNAKE_CASE_: Any ={ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCAmelCase_ ( snake_case_ : Any ) -> str: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False elif args.student_type == "gpt2": UpperCAmelCase_ = False def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser(description="Training" ) parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." ) parser.add_argument( "--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." ) parser.add_argument( "--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." ) parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." ) parser.add_argument( "--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." ) parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." ) parser.add_argument( "--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." ) parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." ) parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." ) parser.add_argument( "--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." ) parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." ) parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." ) parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." ) parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." ) parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." ) parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." ) parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=snake_case_ , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." ) parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" ) parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" ) parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." ) parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." ) UpperCAmelCase_ = parser.parse_args() sanity_checks(snake_case_ ) # ARGS # init_gpu_params(snake_case_ ) set_seed(snake_case_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f: json.dump(vars(snake_case_ ) , snake_case_ , indent=4 ) git_log(args.dump_path ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCAmelCase_ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ ) UpperCAmelCase_ = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) UpperCAmelCase_ = special_tok_ids UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCAmelCase_ = 0.0 # do not predict special tokens UpperCAmelCase_ = torch.from_numpy(snake_case_ ) else: UpperCAmelCase_ = None UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ ) logger.info("Data loader created." ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config ) UpperCAmelCase_ = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ ) else: UpperCAmelCase_ = student_model_class(snake_case_ ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info("Student loaded." ) # TEACHER # UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case_ , snake_case_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case_ , snake_case_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCAmelCase_ = Distiller( params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ ) distiller.train() logger.info("Let's go get some drinks." ) if __name__ == "__main__": main()
78
1
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed SCREAMING_SNAKE_CASE_: Any ={ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCAmelCase_ ( snake_case_ : Any ) -> str: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False elif args.student_type == "gpt2": UpperCAmelCase_ = False def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser(description="Training" ) parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." ) parser.add_argument( "--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." ) parser.add_argument( "--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." ) parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." ) parser.add_argument( "--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." ) parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." ) parser.add_argument( "--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." ) parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." ) parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." ) parser.add_argument( "--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." ) parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." ) parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." ) parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." ) parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." ) parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." ) parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." ) parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=snake_case_ , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." ) parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" ) parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" ) parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." ) parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." ) UpperCAmelCase_ = parser.parse_args() sanity_checks(snake_case_ ) # ARGS # init_gpu_params(snake_case_ ) set_seed(snake_case_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f: json.dump(vars(snake_case_ ) , snake_case_ , indent=4 ) git_log(args.dump_path ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCAmelCase_ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ ) UpperCAmelCase_ = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) UpperCAmelCase_ = special_tok_ids UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCAmelCase_ = 0.0 # do not predict special tokens UpperCAmelCase_ = torch.from_numpy(snake_case_ ) else: UpperCAmelCase_ = None UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ ) logger.info("Data loader created." ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config ) UpperCAmelCase_ = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ ) else: UpperCAmelCase_ = student_model_class(snake_case_ ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info("Student loaded." ) # TEACHER # UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case_ , snake_case_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case_ , snake_case_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCAmelCase_ = Distiller( params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ ) distiller.train() logger.info("Let's go get some drinks." ) if __name__ == "__main__": main()
78
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : int = AutoencoderKL a__ : Optional[Any] = """sample""" a__ : Union[str, Any] = 1e-2 @property def _lowercase (self : Optional[int] ): UpperCAmelCase_ = 4 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a ) return {"sample": image} @property def _lowercase (self : Any ): return (3, 32, 32) @property def _lowercase (self : Dict ): return (3, 32, 32) def _lowercase (self : int ): UpperCAmelCase_ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict def _lowercase (self : int ): pass def _lowercase (self : int ): pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _lowercase (self : List[Any] ): # enable deterministic behavior for gradient checkpointing UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ = self.model_class(**__a ) model.to(__a ) assert not model.is_gradient_checkpointing and model.training UpperCAmelCase_ = model(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() UpperCAmelCase_ = torch.randn_like(__a ) UpperCAmelCase_ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing UpperCAmelCase_ = self.model_class(**__a ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__a ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training UpperCAmelCase_ = model_a(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() UpperCAmelCase_ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) UpperCAmelCase_ = dict(model.named_parameters() ) UpperCAmelCase_ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _lowercase (self : Any ): UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a ) self.assertIsNotNone(__a ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__a ) UpperCAmelCase_ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _lowercase (self : List[str] ): UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) UpperCAmelCase_ = model.to(__a ) model.eval() if torch_device == "mps": UpperCAmelCase_ = torch.manual_seed(0 ) else: UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase_ = image.to(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": UpperCAmelCase_ = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": UpperCAmelCase_ = torch.tensor( [-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] ) else: UpperCAmelCase_ = torch.tensor( [-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] ) self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) ) @slow class __A ( unittest.TestCase ): def _lowercase (self : Dict , __a : Dict , __a : int ): return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy""" def _lowercase (self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ): UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a ) return image def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ): UpperCAmelCase_ = "fp16" if fpaa else None UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ = AutoencoderKL.from_pretrained( __a , subfolder="vae" , torch_dtype=__a , revision=__a , ) model.to(__a ).eval() return model def _lowercase (self : List[Any] , __a : List[Any]=0 ): if torch_device == "mps": return torch.manual_seed(__a ) return torch.Generator(device=__a ).manual_seed(__a ) @parameterized.expand( [ # fmt: off [33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__a , __a , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]], [47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : Dict , __a : Optional[int] , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__a , __a , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]], [37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : int , __a : int , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]], [16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase (self : List[str] , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase (self : Union[str, Any] , __a : Dict ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]], [47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]], # fmt: on ] ) def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model.encode(__a ).latent_dist UpperCAmelCase_ = dist.sample(generator=__a ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu() UpperCAmelCase_ = torch.tensor(__a ) UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(__a , __a , atol=__a )
78
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
78
'''simple docstring''' import logging from transformers import PretrainedConfig SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__) SCREAMING_SNAKE_CASE_: Any ={ 'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json', } class __A ( UpperCamelCase__ ): a__ : List[Any] = """bertabs""" def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ): super().__init__(**__a ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_pos UpperCAmelCase_ = enc_layers UpperCAmelCase_ = enc_hidden_size UpperCAmelCase_ = enc_heads UpperCAmelCase_ = enc_ff_size UpperCAmelCase_ = enc_dropout UpperCAmelCase_ = dec_layers UpperCAmelCase_ = dec_hidden_size UpperCAmelCase_ = dec_heads UpperCAmelCase_ = dec_ff_size UpperCAmelCase_ = dec_dropout
78
1
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> tuple: '''simple docstring''' if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("You cannot supply more or less than 2 values" ) elif electron_conc < 0: raise ValueError("Electron concentration cannot be negative in a semiconductor" ) elif hole_conc < 0: raise ValueError("Hole concentration cannot be negative in a semiconductor" ) elif intrinsic_conc < 0: raise ValueError( "Intrinsic concentration cannot be negative in a semiconductor" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
78
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "imagenet-1k-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = {v: k for k, v in idalabel.items()} UpperCAmelCase_ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" UpperCAmelCase_ = BitConfig( conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , ) return config def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if "stem.conv" in name: UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: UpperCAmelCase_ = name.replace("blocks" , "layers" ) if "head.fc" in name: UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): UpperCAmelCase_ = "bit." + name if "bit" not in name and "classifier" not in name: UpperCAmelCase_ = "bit.encoder." + name return name def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = get_config(snake_case_ ) # load original model from timm UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ ) timm_model.eval() # load state_dict of original model UpperCAmelCase_ = timm_model.state_dict() for key in state_dict.copy().keys(): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val.squeeze() if "head" in key else val # load HuggingFace model UpperCAmelCase_ = BitForImageClassification(snake_case_ ) model.eval() model.load_state_dict(snake_case_ ) # create image processor UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) ) UpperCAmelCase_ = transform.transforms UpperCAmelCase_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } UpperCAmelCase_ = BitImageProcessor( do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ) UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(snake_case_ , snake_case_ ) # verify logits with torch.no_grad(): UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) UpperCAmelCase_ = timm_model(snake_case_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) if push_to_hub: print(f"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(f"""ybelkada/{model_name}""" ) processor.push_to_hub(f"""ybelkada/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
78
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool SCREAMING_SNAKE_CASE_: Union[str, Any] ={ 'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', 'Ta\'izzi-Adeni Arabic': 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn', } class __A ( UpperCamelCase__ ): a__ : Any = """facebook/nllb-200-distilled-600M""" a__ : Union[str, Any] = ( """This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """ """be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """ """which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """ """plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`.""" ) a__ : int = """translator""" a__ : Optional[Any] = AutoTokenizer a__ : Optional[Any] = AutoModelForSeqaSeqLM a__ : Dict = LANGUAGE_CODES a__ : Union[str, Any] = ["""text""", """text""", """text"""] a__ : Union[str, Any] = ["""text"""] def _lowercase (self : int , __a : int , __a : Dict , __a : int ): if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) UpperCAmelCase_ = self.lang_to_code[src_lang] UpperCAmelCase_ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __a , return_tensors="pt" , src_lang=__a , tgt_lang=__a ) def _lowercase (self : Optional[Any] , __a : List[str] ): return self.model.generate(**__a ) def _lowercase (self : Any , __a : Tuple ): return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__a )
78
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): UpperCAmelCase_ = 0 def _lowercase (self : Tuple ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" ) self.assertIsInstance(__a , __a ) def _lowercase (self : str ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = CLIPConfig() # Create a dummy config file with image_proceesor_type UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict() config_dict.pop("image_processor_type" ) UpperCAmelCase_ = CLIPImageProcessor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) # make sure private variable is not incorrectly saved UpperCAmelCase_ = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def _lowercase (self : int ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Tuple ): with self.assertRaisesRegex( __a , "clip-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" ) def _lowercase (self : Optional[int] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" ) def _lowercase (self : Union[str, Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" ) def _lowercase (self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" ) def _lowercase (self : Optional[int] ): try: AutoConfig.register("custom" , __a ) AutoImageProcessor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoImageProcessor.register(__a , __a ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _lowercase (self : Optional[int] ): class __A ( UpperCamelCase__ ): a__ : str = True try: AutoConfig.register("custom" , __a ) AutoImageProcessor.register(__a , __a ) # If remote code is not set, the default is to use local UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
78
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int ) -> int: '''simple docstring''' UpperCAmelCase_ = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def lowerCAmelCase_ ( snake_case_ : int = 1_00 ) -> int: '''simple docstring''' UpperCAmelCase_ = 1 UpperCAmelCase_ = 2 for i in range(2 , max_n + 1 ): UpperCAmelCase_ = pre_numerator UpperCAmelCase_ = 2 * i // 3 if i % 3 == 0 else 1 UpperCAmelCase_ = cur_numerator UpperCAmelCase_ = e_cont * pre_numerator + temp return sum_digits(snake_case_ ) if __name__ == "__main__": print(f"{solution() = }")
78
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False @dataclass class __A : a__ : Optional[int] = None a__ : bool = True a__ : bool = True a__ : Optional[str] = None # Automatically constructed a__ : ClassVar[str] = "dict" a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ ) def __call__(self : Optional[Any] ): return self.pa_type def _lowercase (self : str , __a : Union[str, bytes, dict] ): try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(__a , __a ): return {"bytes": None, "path": value} elif isinstance(__a , __a ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes UpperCAmelCase_ = BytesIO() sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767 else: UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767 UpperCAmelCase_ = BytesIO(bytes() ) sf.write(__a , __a , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ): if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: UpperCAmelCase_ = token_per_repo_id or {} UpperCAmelCase_ = path.split("::" )[-1] try: UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"] UpperCAmelCase_ = token_per_repo_id[repo_id] except (ValueError, KeyError): UpperCAmelCase_ = None with xopen(__a , "rb" , use_auth_token=__a ) as f: UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a ) else: UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a ) UpperCAmelCase_ = array.T if self.mono: UpperCAmelCase_ = librosa.to_mono(__a ) if self.sampling_rate and self.sampling_rate != sampling_rate: UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate ) UpperCAmelCase_ = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def _lowercase (self : Dict ): from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ): if pa.types.is_string(storage.type ): UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() ) UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: UpperCAmelCase_ = storage.field("bytes" ) else: UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: UpperCAmelCase_ = storage.field("path" ) else: UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) return array_cast(__a , self.pa_type ) def _lowercase (self : Dict , __a : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(__a : Tuple ): with xopen(__a , "rb" ) as f: UpperCAmelCase_ = f.read() return bytes_ UpperCAmelCase_ = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) UpperCAmelCase_ = pa.array( [os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(__a , self.pa_type )
78
1
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class __A ( UpperCamelCase__ ): a__ : List[Any] = (DDIMParallelScheduler,) a__ : str = (("""eta""", 0.0), ("""num_inference_steps""", 50)) def _lowercase (self : List[str] , **__a : Tuple ): UpperCAmelCase_ = { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**__a ) return config def _lowercase (self : List[str] , **__a : Union[str, Any] ): UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(**__a ) UpperCAmelCase_ = scheduler_class(**__a ) UpperCAmelCase_ , UpperCAmelCase_ = 10, 0.0 UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter scheduler.set_timesteps(__a ) for t in scheduler.timesteps: UpperCAmelCase_ = model(__a , __a ) UpperCAmelCase_ = scheduler.step(__a , __a , __a , __a ).prev_sample return sample def _lowercase (self : Union[str, Any] ): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=__a ) def _lowercase (self : str ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__a ) UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase_ = scheduler_class(**__a ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def _lowercase (self : Optional[int] ): for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def _lowercase (self : Any ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__a ) def _lowercase (self : Tuple ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def _lowercase (self : Optional[Any] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=__a ) def _lowercase (self : Any ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=__a ) def _lowercase (self : List[str] ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=__a ) def _lowercase (self : int ): self.check_over_configs(thresholding=__a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , ) def _lowercase (self : Tuple ): for t in [1, 10, 49]: self.check_over_forward(time_step=__a ) def _lowercase (self : int ): for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=__a , num_inference_steps=__a ) def _lowercase (self : Dict ): for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=__a , eta=__a ) def _lowercase (self : Dict ): UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**__a ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5 def _lowercase (self : List[str] ): UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**__a ) UpperCAmelCase_ , UpperCAmelCase_ = 10, 0.0 scheduler.set_timesteps(__a ) UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter UpperCAmelCase_ = self.dummy_sample_deter + 0.1 UpperCAmelCase_ = self.dummy_sample_deter - 0.1 UpperCAmelCase_ = samplea.shape[0] UpperCAmelCase_ = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCAmelCase_ = torch.arange(__a )[0:3, None].repeat(1 , __a ) UpperCAmelCase_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCAmelCase_ = scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __a ) UpperCAmelCase_ = torch.sum(torch.abs(__a ) ) UpperCAmelCase_ = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2 assert abs(result_mean.item() - 0.49_82 ) < 1E-3 def _lowercase (self : Dict ): UpperCAmelCase_ = self.full_loop() UpperCAmelCase_ = torch.sum(torch.abs(__a ) ) UpperCAmelCase_ = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3 def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase_ = torch.sum(torch.abs(__a ) ) UpperCAmelCase_ = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 52.53_02 ) < 1E-2 assert abs(result_mean.item() - 0.06_84 ) < 1E-3 def _lowercase (self : Any ): # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 ) UpperCAmelCase_ = torch.sum(torch.abs(__a ) ) UpperCAmelCase_ = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2 assert abs(result_mean.item() - 0.19_51 ) < 1E-3 def _lowercase (self : str ): # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 ) UpperCAmelCase_ = torch.sum(torch.abs(__a ) ) UpperCAmelCase_ = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2 assert abs(result_mean.item() - 0.19_41 ) < 1E-3
78
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" ) UpperCAmelCase_ = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ ) return image def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if "visual_encoder" in key: UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ ) if "blocks" in key: UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ ) if "attn" in key: UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ ) if "norm1" in key: UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ ) if "norm2" in key: UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ ) if "encoder.norm" in key: UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ ) if "encoder.patch_embed.proj" in key: UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ ) if "encoder.pos_embed" in key: UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ ) if "encoder.cls_token" in key: UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ ) if "self_attn" in key: UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ ) return key @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ ) else: UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} ) UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval() UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth" UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" ) UpperCAmelCase_ = pt_model.eval() UpperCAmelCase_ = pt_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value hf_model.load_state_dict(snake_case_ ) UpperCAmelCase_ = 3_84 UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" ) UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ ) assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] UpperCAmelCase_ = hf_model.generate(snake_case_ ) assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(snake_case_ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' UpperCAmelCase_ = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth" ) UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" ) vqa_model.eval() UpperCAmelCase_ = vqa_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ ) hf_vqa_model.load_state_dict(snake_case_ ) UpperCAmelCase_ = ["How many dogs are in this image?"] UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" ) UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth" UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" ) itm_model.eval() UpperCAmelCase_ = itm_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ ) UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"] UpperCAmelCase_ = tokenizer( snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(snake_case_ ) hf_itm_model.eval() UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ ) UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE_: int =parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
78
1
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE_: int =logging.getLogger() def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ = parser.parse_args() return args.f def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" ) if os.path.exists(snake_case_ ): with open(snake_case_ , "r" ) as f: UpperCAmelCase_ = json.load(snake_case_ ) else: raise ValueError(f"""can't find {path}""" ) return results def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __A ( UpperCamelCase__ ): @classmethod def _lowercase (cls : Any ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def _lowercase (cls : int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : int ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 28 ) self.assertGreaterEqual(result["eval_exact"] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : str ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_rouge1"] , 10 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : List[str] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_bleu"] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) ) @slow def _lowercase (self : Dict ): UpperCAmelCase_ = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Any ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
78
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case_ : Optional[int] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case_ : Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCAmelCase_ = [] for i in range(snake_case_ ): UpperCAmelCase_ = i / num_diffusion_timesteps UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) ) return torch.tensor(snake_case_ , dtype=torch.floataa ) class __A ( UpperCamelCase__ , UpperCamelCase__ ): a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers] a__ : Optional[Any] = 2 @register_to_config def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ): if trained_betas is not None: UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa ) elif beta_schedule == "linear": UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase_ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" ) elif beta_schedule == "exp": UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) UpperCAmelCase_ = 1.0 - self.betas UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__a , __a , __a ) UpperCAmelCase_ = use_karras_sigmas def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ): if schedule_timesteps is None: UpperCAmelCase_ = self.timesteps UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: UpperCAmelCase_ = 1 if len(__a ) > 1 else 0 else: UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep UpperCAmelCase_ = self._index_counter[timestep_int] return indices[pos].item() @property def _lowercase (self : List[Any] ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ): UpperCAmelCase_ = self.index_for_timestep(__a ) UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5) return sample def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ): UpperCAmelCase_ = num_inference_steps UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy() elif self.config.timestep_spacing == "leading": UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) UpperCAmelCase_ = np.log(__a ) UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a ) if self.config.use_karras_sigmas: UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps ) UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] ) UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a ) UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) UpperCAmelCase_ = torch.from_numpy(__a ) UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(__a ).startswith("mps" ): # mps does not support float64 UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa ) else: UpperCAmelCase_ = timesteps.to(device=__a ) # empty dt and derivative UpperCAmelCase_ = None UpperCAmelCase_ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter UpperCAmelCase_ = defaultdict(__a ) def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ): # get log sigma UpperCAmelCase_ = np.log(__a ) # get distribution UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) UpperCAmelCase_ = low_idx + 1 UpperCAmelCase_ = log_sigmas[low_idx] UpperCAmelCase_ = log_sigmas[high_idx] # interpolate sigmas UpperCAmelCase_ = (low - log_sigma) / (low - high) UpperCAmelCase_ = np.clip(__a , 0 , 1 ) # transform interpolation to time range UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx UpperCAmelCase_ = t.reshape(sigma.shape ) return t def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ): UpperCAmelCase_ = in_sigmas[-1].item() UpperCAmelCase_ = in_sigmas[0].item() UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper UpperCAmelCase_ = np.linspace(0 , 1 , __a ) UpperCAmelCase_ = sigma_min ** (1 / rho) UpperCAmelCase_ = sigma_max ** (1 / rho) UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _lowercase (self : List[str] ): return self.dt is None def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ): UpperCAmelCase_ = self.index_for_timestep(__a ) # advance index counter by 1 UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method UpperCAmelCase_ = self.sigmas[step_index - 1] UpperCAmelCase_ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API UpperCAmelCase_ = 0 UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": UpperCAmelCase_ = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: UpperCAmelCase_ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep UpperCAmelCase_ = sigma_next - sigma_hat # store for 2nd order step UpperCAmelCase_ = derivative UpperCAmelCase_ = dt UpperCAmelCase_ = sample else: # 2. 2nd order / Heun's method UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next UpperCAmelCase_ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample UpperCAmelCase_ = self.dt UpperCAmelCase_ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__a ) def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__a ): # mps does not support float64 UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: UpperCAmelCase_ = self.timesteps.to(original_samples.device ) UpperCAmelCase_ = timesteps.to(original_samples.device ) UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps] UpperCAmelCase_ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): UpperCAmelCase_ = sigma.unsqueeze(-1 ) UpperCAmelCase_ = original_samples + noise * sigma return noisy_samples def __len__(self : str ): return self.config.num_train_timesteps
78
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler SCREAMING_SNAKE_CASE_: Tuple =16 SCREAMING_SNAKE_CASE_: Tuple =32 def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = AutoTokenizer.from_pretrained(snake_case_ ) UpperCAmelCase_ = load_dataset("glue" , "mrpc" ) def tokenize_function(snake_case_ : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ = datasets.map( snake_case_ , batched=snake_case_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=snake_case_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(snake_case_ : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case_ , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(snake_case_ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. UpperCAmelCase_ = DataLoader( tokenized_datasets["train"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) UpperCAmelCase_ = DataLoader( tokenized_datasets["validation"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ = config["lr"] UpperCAmelCase_ = int(config["num_epochs"] ) UpperCAmelCase_ = int(config["seed"] ) UpperCAmelCase_ = int(config["batch_size"] ) UpperCAmelCase_ = args.model_name_or_path set_seed(snake_case_ ) UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ ) # Instantiate optimizer UpperCAmelCase_ = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ = optimizer_cls(params=model.parameters() , lr=snake_case_ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: UpperCAmelCase_ = 1 UpperCAmelCase_ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ = get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , ) else: UpperCAmelCase_ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ = 0 # Now we train the model UpperCAmelCase_ = evaluate.load("glue" , "mrpc" ) UpperCAmelCase_ = 0 UpperCAmelCase_ = {} for epoch in range(snake_case_ , snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): UpperCAmelCase_ = model(**snake_case_ ) UpperCAmelCase_ = outputs.loss UpperCAmelCase_ = loss / gradient_accumulation_steps accelerator.backward(snake_case_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() UpperCAmelCase_ = 0 for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ = model(**snake_case_ ) UpperCAmelCase_ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case_ ) - 1: UpperCAmelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) UpperCAmelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , snake_case_ ) UpperCAmelCase_ = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: UpperCAmelCase_ = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f: json.dump(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=snake_case_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case_ , ) parser.add_argument( "--output_dir" , type=snake_case_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--performance_lower_bound" , type=snake_case_ , default=snake_case_ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , ) parser.add_argument( "--num_epochs" , type=snake_case_ , default=3 , help="Number of train epochs." , ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
78
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __A ( UpperCamelCase__ ): a__ : List[str] = """Salesforce/blip-image-captioning-base""" a__ : Optional[Any] = ( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) a__ : str = """image_captioner""" a__ : List[str] = AutoModelForVisionaSeq a__ : int = ["""image"""] a__ : Optional[Any] = ["""text"""] def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ): requires_backends(self , ["vision"] ) super().__init__(*__a , **__a ) def _lowercase (self : Union[str, Any] , __a : "Image" ): return self.pre_processor(images=__a , return_tensors="pt" ) def _lowercase (self : List[str] , __a : Dict ): return self.model.generate(**__a ) def _lowercase (self : int , __a : Optional[Any] ): return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
78
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Union[str, Any] ={ 'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json', 'BridgeTower/bridgetower-base-itm-mlm': ( 'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json' ), } class __A ( UpperCamelCase__ ): a__ : List[Any] = """bridgetower_vision_model""" def __init__(self : Optional[int] , __a : str=768 , __a : Dict=12 , __a : Optional[Any]=3 , __a : Dict=16 , __a : Dict=288 , __a : Union[str, Any]=1 , __a : Any=1E-05 , __a : Optional[Any]=False , __a : Optional[Any]=True , __a : Dict=False , **__a : Optional[int] , ): super().__init__(**__a ) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_channels UpperCAmelCase_ = patch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = initializer_factor UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = stop_gradient UpperCAmelCase_ = share_layernorm UpperCAmelCase_ = remove_last_layer @classmethod def _lowercase (cls : List[str] , __a : Union[str, os.PathLike] , **__a : List[str] ): UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(__a , **__a ) if config_dict.get("model_type" ) == "bridgetower": UpperCAmelCase_ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__a , **__a ) class __A ( UpperCamelCase__ ): a__ : Optional[Any] = """bridgetower_text_model""" def __init__(self : Tuple , __a : List[Any]=50265 , __a : int=768 , __a : int=12 , __a : str=12 , __a : Any=1 , __a : Dict=3072 , __a : int="gelu" , __a : Tuple=0.1 , __a : Optional[int]=0.1 , __a : Tuple=514 , __a : List[Any]=1 , __a : Tuple=1E-05 , __a : Any=1 , __a : Any=0 , __a : Dict=2 , __a : Optional[Any]="absolute" , __a : str=True , **__a : int , ): super().__init__(**__a ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = hidden_act UpperCAmelCase_ = initializer_factor UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = position_embedding_type UpperCAmelCase_ = use_cache UpperCAmelCase_ = pad_token_id UpperCAmelCase_ = bos_token_id UpperCAmelCase_ = eos_token_id @classmethod def _lowercase (cls : Optional[int] , __a : Union[str, os.PathLike] , **__a : List[str] ): UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(__a , **__a ) if config_dict.get("model_type" ) == "bridgetower": UpperCAmelCase_ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__a , **__a ) class __A ( UpperCamelCase__ ): a__ : List[str] = """bridgetower""" def __init__(self : int , __a : Optional[Any]=True , __a : Optional[Any]="gelu" , __a : str=768 , __a : Optional[Any]=1 , __a : List[str]=1E-05 , __a : List[Any]=False , __a : int="add" , __a : Optional[int]=12 , __a : List[str]=6 , __a : Dict=False , __a : Optional[int]=False , __a : Optional[int]=None , __a : Tuple=None , **__a : str , ): # TODO: remove this once the Hub files are updated. UpperCAmelCase_ = kwargs.pop("text_config_dict" , __a ) UpperCAmelCase_ = kwargs.pop("vision_config_dict" , __a ) super().__init__(**__a ) UpperCAmelCase_ = share_cross_modal_transformer_layers UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_size UpperCAmelCase_ = initializer_factor UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = share_link_tower_layers UpperCAmelCase_ = link_tower_type UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = tie_word_embeddings UpperCAmelCase_ = init_layernorm_from_vision_encoder if text_config is None: UpperCAmelCase_ = {} logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." ) if vision_config is None: UpperCAmelCase_ = {} logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." ) UpperCAmelCase_ = BridgeTowerTextConfig(**__a ) UpperCAmelCase_ = BridgeTowerVisionConfig(**__a ) @classmethod def _lowercase (cls : str , __a : BridgeTowerTextConfig , __a : BridgeTowerVisionConfig , **__a : Union[str, Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ = self.text_config.to_dict() UpperCAmelCase_ = self.vision_config.to_dict() UpperCAmelCase_ = self.__class__.model_type return output
78
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]: '''simple docstring''' UpperCAmelCase_ = [] if isinstance(snake_case_ , snake_case_ ): for v in tree.values(): shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]: '''simple docstring''' UpperCAmelCase_ = [] for d in reversed(snake_case_ ): idx.append(flat_idx % d ) UpperCAmelCase_ = flat_idx // d return tuple(reversed(snake_case_ ) ) @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]: '''simple docstring''' def reduce_edge_list(snake_case_ : List[bool] ) -> None: UpperCAmelCase_ = True for i in range(len(snake_case_ ) ): UpperCAmelCase_ = -1 * (i + 1) l[reversed_idx] &= tally UpperCAmelCase_ = l[reversed_idx] if start_edges is None: UpperCAmelCase_ = [s == 0 for s in start] reduce_edge_list(snake_case_ ) if end_edges is None: UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )] reduce_edge_list(snake_case_ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(snake_case_ ) == 0: return [()] elif len(snake_case_ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] UpperCAmelCase_ = [] UpperCAmelCase_ = [] # Dimensions common to start and end can be selected directly for s, e in zip(snake_case_ , snake_case_ ): if s == e: path_list.append(slice(snake_case_ , s + 1 ) ) else: break UpperCAmelCase_ = tuple(snake_case_ ) UpperCAmelCase_ = len(snake_case_ ) # start == end, and we're done if divergence_idx == len(snake_case_ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase_ = start[divergence_idx] return tuple( path + (slice(snake_case_ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase_ = end[divergence_idx] return tuple( path + (slice(snake_case_ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor: '''simple docstring''' UpperCAmelCase_ = t.shape[:no_batch_dims] UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) ) # _get_minimal_slice_set is inclusive UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) ) # Get an ordered list of slices to perform UpperCAmelCase_ = _get_minimal_slice_set( snake_case_ , snake_case_ , snake_case_ , ) UpperCAmelCase_ = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any: '''simple docstring''' if not (len(snake_case_ ) > 0): raise ValueError("Must provide at least one input" ) UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )] UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] ) def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ ) UpperCAmelCase_ = None if _out is not None: UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) UpperCAmelCase_ = 1 for d in orig_batch_dims: flat_batch_dim *= d UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t UpperCAmelCase_ = 0 UpperCAmelCase_ = prepped_outputs for _ in range(snake_case_ ): # Chunk the input if not low_mem: UpperCAmelCase_ = _select_chunk else: UpperCAmelCase_ = partial( _chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , ) UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ ) # Run the layer on the chunk UpperCAmelCase_ = layer(**snake_case_ ) # Allocate space for the output if out is None: UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ ) # Put the chunk in its pre-allocated space if isinstance(snake_case_ , snake_case_ ): def assign(snake_case_ : dict , snake_case_ : dict ) -> None: for k, v in da.items(): if isinstance(snake_case_ , snake_case_ ): assign(snake_case_ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: UpperCAmelCase_ = da[k] assign(snake_case_ , snake_case_ ) elif isinstance(snake_case_ , snake_case_ ): for xa, xa in zip(snake_case_ , snake_case_ ): if _add_into_out: xa[i : i + chunk_size] += xa else: UpperCAmelCase_ = xa elif isinstance(snake_case_ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: UpperCAmelCase_ = output_chunk else: raise ValueError("Not supported" ) i += chunk_size UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ ) return out class __A : def __init__(self : Dict , __a : int = 512 , ): UpperCAmelCase_ = max_chunk_size UpperCAmelCase_ = None UpperCAmelCase_ = None def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ): logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size] UpperCAmelCase_ = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__a : int ) -> bool: try: with torch.no_grad(): fn(*__a , chunk_size=__a ) return True except RuntimeError: return False UpperCAmelCase_ = 0 UpperCAmelCase_ = len(__a ) - 1 while i > min_viable_chunk_size_index: UpperCAmelCase_ = test_chunk_size(candidates[i] ) if not viable: UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2 else: UpperCAmelCase_ = i UpperCAmelCase_ = (i + len(__a ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _lowercase (self : int , __a : Iterable , __a : Iterable ): UpperCAmelCase_ = True for aa, aa in zip(__a , __a ): assert type(__a ) == type(__a ) if isinstance(__a , (list, tuple) ): consistent &= self._compare_arg_caches(__a , __a ) elif isinstance(__a , __a ): UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] consistent &= self._compare_arg_caches(__a , __a ) else: consistent &= aa == aa return consistent def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ): UpperCAmelCase_ = True UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(__a ) UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a ) else: # Otherwise, we can reuse the precomputed value UpperCAmelCase_ = False if not consistent: UpperCAmelCase_ = self._determine_favorable_chunk_size( __a , __a , __a , ) UpperCAmelCase_ = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
78
1
'''simple docstring''' from typing import Any import numpy as np def lowerCAmelCase_ ( snake_case_ : np.ndarray ) -> bool: '''simple docstring''' return np.array_equal(snake_case_ , matrix.conjugate().T ) def lowerCAmelCase_ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> Any: '''simple docstring''' UpperCAmelCase_ = v.conjugate().T UpperCAmelCase_ = v_star.dot(snake_case_ ) assert isinstance(snake_case_ , np.ndarray ) return (v_star_dot.dot(snake_case_ )) / (v_star.dot(snake_case_ )) def lowerCAmelCase_ ( ) -> None: '''simple docstring''' UpperCAmelCase_ = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] ) UpperCAmelCase_ = np.array([[1], [2], [3]] ) assert is_hermitian(snake_case_ ), f"""{a} is not hermitian.""" print(rayleigh_quotient(snake_case_ , snake_case_ ) ) UpperCAmelCase_ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(snake_case_ ), f"""{a} is not hermitian.""" assert rayleigh_quotient(snake_case_ , snake_case_ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
78
'''simple docstring''' import copy import re class __A : a__ : Optional[int] = """hp""" a__ : Optional[Any] = {} a__ : List[Any] = None @classmethod def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ): UpperCAmelCase_ = prefix UpperCAmelCase_ = defaults cls.build_naming_info() @staticmethod def _lowercase (__a : List[Any] , __a : List[str] ): if len(__a ) == 0: return "" UpperCAmelCase_ = None if any(char.isdigit() for char in word ): raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__a ) + 1 ): UpperCAmelCase_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: UpperCAmelCase_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__a : Union[str, Any] ): UpperCAmelCase_ = "" while integer != 0: UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s UpperCAmelCase_ = 0 while True: UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a ) if sword in info["reverse_short_word"]: continue else: UpperCAmelCase_ = sword break UpperCAmelCase_ = short_word UpperCAmelCase_ = word return short_word @staticmethod def _lowercase (__a : List[str] , __a : Union[str, Any] ): UpperCAmelCase_ = param_name.split("_" ) UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name UpperCAmelCase_ = ["", "_"] for separator in separators: UpperCAmelCase_ = separator.join(__a ) if shortname not in info["reverse_short_param"]: UpperCAmelCase_ = shortname UpperCAmelCase_ = param_name return shortname return param_name @staticmethod def _lowercase (__a : int , __a : Union[str, Any] ): UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a ) UpperCAmelCase_ = short_name UpperCAmelCase_ = param_name @classmethod def _lowercase (cls : Any ): if cls.NAMING_INFO is not None: return UpperCAmelCase_ = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } UpperCAmelCase_ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__a , __a ) UpperCAmelCase_ = info @classmethod def _lowercase (cls : int , __a : Optional[int] ): cls.build_naming_info() assert cls.PREFIX is not None UpperCAmelCase_ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k] if isinstance(__a , __a ): UpperCAmelCase_ = 1 if v else 0 UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-" UpperCAmelCase_ = f"""{key}{sep}{v}""" name.append(__a ) return "_".join(__a ) @classmethod def _lowercase (cls : Dict , __a : Dict ): UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :] if repr == "": UpperCAmelCase_ = [] else: UpperCAmelCase_ = repr.split("_" ) UpperCAmelCase_ = {} for value in values: if "-" in value: UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" ) else: UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a ) UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) ) UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k] UpperCAmelCase_ = p_v for k in cls.DEFAULTS: if k not in parameters: UpperCAmelCase_ = cls.DEFAULTS[k] return parameters
78
1
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case_ : Optional[int] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case_ : Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCAmelCase_ = [] for i in range(snake_case_ ): UpperCAmelCase_ = i / num_diffusion_timesteps UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) ) return torch.tensor(snake_case_ , dtype=torch.floataa ) class __A ( UpperCamelCase__ , UpperCamelCase__ ): a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers] a__ : Optional[Any] = 2 @register_to_config def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ): if trained_betas is not None: UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa ) elif beta_schedule == "linear": UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase_ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" ) elif beta_schedule == "exp": UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) UpperCAmelCase_ = 1.0 - self.betas UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__a , __a , __a ) UpperCAmelCase_ = use_karras_sigmas def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ): if schedule_timesteps is None: UpperCAmelCase_ = self.timesteps UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: UpperCAmelCase_ = 1 if len(__a ) > 1 else 0 else: UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep UpperCAmelCase_ = self._index_counter[timestep_int] return indices[pos].item() @property def _lowercase (self : List[Any] ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ): UpperCAmelCase_ = self.index_for_timestep(__a ) UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5) return sample def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ): UpperCAmelCase_ = num_inference_steps UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy() elif self.config.timestep_spacing == "leading": UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) UpperCAmelCase_ = np.log(__a ) UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a ) if self.config.use_karras_sigmas: UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps ) UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] ) UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a ) UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) UpperCAmelCase_ = torch.from_numpy(__a ) UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(__a ).startswith("mps" ): # mps does not support float64 UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa ) else: UpperCAmelCase_ = timesteps.to(device=__a ) # empty dt and derivative UpperCAmelCase_ = None UpperCAmelCase_ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter UpperCAmelCase_ = defaultdict(__a ) def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ): # get log sigma UpperCAmelCase_ = np.log(__a ) # get distribution UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) UpperCAmelCase_ = low_idx + 1 UpperCAmelCase_ = log_sigmas[low_idx] UpperCAmelCase_ = log_sigmas[high_idx] # interpolate sigmas UpperCAmelCase_ = (low - log_sigma) / (low - high) UpperCAmelCase_ = np.clip(__a , 0 , 1 ) # transform interpolation to time range UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx UpperCAmelCase_ = t.reshape(sigma.shape ) return t def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ): UpperCAmelCase_ = in_sigmas[-1].item() UpperCAmelCase_ = in_sigmas[0].item() UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper UpperCAmelCase_ = np.linspace(0 , 1 , __a ) UpperCAmelCase_ = sigma_min ** (1 / rho) UpperCAmelCase_ = sigma_max ** (1 / rho) UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _lowercase (self : List[str] ): return self.dt is None def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ): UpperCAmelCase_ = self.index_for_timestep(__a ) # advance index counter by 1 UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method UpperCAmelCase_ = self.sigmas[step_index - 1] UpperCAmelCase_ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API UpperCAmelCase_ = 0 UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": UpperCAmelCase_ = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: UpperCAmelCase_ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep UpperCAmelCase_ = sigma_next - sigma_hat # store for 2nd order step UpperCAmelCase_ = derivative UpperCAmelCase_ = dt UpperCAmelCase_ = sample else: # 2. 2nd order / Heun's method UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next UpperCAmelCase_ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample UpperCAmelCase_ = self.dt UpperCAmelCase_ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__a ) def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__a ): # mps does not support float64 UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: UpperCAmelCase_ = self.timesteps.to(original_samples.device ) UpperCAmelCase_ = timesteps.to(original_samples.device ) UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps] UpperCAmelCase_ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): UpperCAmelCase_ = sigma.unsqueeze(-1 ) UpperCAmelCase_ = original_samples + noise * sigma return noisy_samples def __len__(self : str ): return self.config.num_train_timesteps
78
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): a__ : Tuple = ["""pixel_values"""] def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ): super().__init__(**__a ) UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_pad UpperCAmelCase_ = pad_size def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ): return rescale(__a , scale=__a , data_format=__a , **__a ) def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ): UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a ) UpperCAmelCase_ = (old_height // size + 1) * size - old_height UpperCAmelCase_ = (old_width // size + 1) * size - old_width return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a ) def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ): UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size UpperCAmelCase_ = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(__a ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images] if do_pad: UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images] UpperCAmelCase_ = {"pixel_values": images} return BatchFeature(data=__a , tensor_type=__a )
78
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]: '''simple docstring''' if "img_encoder.pos_embed" in name: UpperCAmelCase_ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: UpperCAmelCase_ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: UpperCAmelCase_ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: UpperCAmelCase_ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: UpperCAmelCase_ = name.replace("blocks" , "layers" ) if "attn" in name and "pre_assign" not in name: UpperCAmelCase_ = name.replace("attn" , "self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: UpperCAmelCase_ = name.replace("proj" , "out_proj" ) if "pre_assign_attn.attn.proj" in name: UpperCAmelCase_ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" ) if "norm1" in name: UpperCAmelCase_ = name.replace("norm1" , "layer_norm1" ) if "norm2" in name and "pre_assign" not in name: UpperCAmelCase_ = name.replace("norm2" , "layer_norm2" ) if "img_encoder.norm" in name: UpperCAmelCase_ = name.replace("img_encoder.norm" , "vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: UpperCAmelCase_ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: UpperCAmelCase_ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: UpperCAmelCase_ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." ) if "ln_1" in name: UpperCAmelCase_ = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: UpperCAmelCase_ = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: UpperCAmelCase_ = name.replace("c_fc" , "fc1" ) if "c_proj" in name: UpperCAmelCase_ = name.replace("c_proj" , "fc2" ) if "text_encoder" in name: UpperCAmelCase_ = name.replace("text_encoder" , "text_model" ) if "ln_final" in name: UpperCAmelCase_ = name.replace("ln_final" , "final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: UpperCAmelCase_ = name.replace("img_projector.linear_hidden." , "visual_projection." ) if "img_projector.linear_out." in name: UpperCAmelCase_ = name.replace("img_projector.linear_out." , "visual_projection.3." ) if "text_projector.linear_hidden" in name: UpperCAmelCase_ = name.replace("text_projector.linear_hidden" , "text_projection" ) if "text_projector.linear_out" in name: UpperCAmelCase_ = name.replace("text_projector.linear_out" , "text_projection.3" ) return name def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] ) -> int: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase_ = orig_state_dict.pop(snake_case_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors UpperCAmelCase_ = key.split("." ) UpperCAmelCase_ , UpperCAmelCase_ = int(key_split[2] ), int(key_split[4] ) UpperCAmelCase_ = config.vision_config.hidden_size if "weight" in key: UpperCAmelCase_ = val[:dim, :] UpperCAmelCase_ = val[dim : dim * 2, :] UpperCAmelCase_ = val[-dim:, :] else: UpperCAmelCase_ = val[:dim] UpperCAmelCase_ = val[dim : dim * 2] UpperCAmelCase_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors UpperCAmelCase_ = key.split("." ) UpperCAmelCase_ = int(key_split[3] ) UpperCAmelCase_ = config.text_config.hidden_size if "weight" in key: UpperCAmelCase_ = val[:dim, :] UpperCAmelCase_ = val[ dim : dim * 2, : ] UpperCAmelCase_ = val[-dim:, :] else: UpperCAmelCase_ = val[:dim] UpperCAmelCase_ = val[dim : dim * 2] UpperCAmelCase_ = val[-dim:] else: UpperCAmelCase_ = rename_key(snake_case_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): UpperCAmelCase_ = val.squeeze_() else: UpperCAmelCase_ = val return orig_state_dict def lowerCAmelCase_ ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Dict="groupvit-gcc-yfcc" , snake_case_ : Any=False ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = GroupViTConfig() UpperCAmelCase_ = GroupViTModel(snake_case_ ).eval() UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )["model"] UpperCAmelCase_ = convert_state_dict(snake_case_ , snake_case_ ) UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(snake_case_ ) == 0) # verify result UpperCAmelCase_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = processor(text=["a photo of a cat", "a photo of a dog"] , images=snake_case_ , padding=snake_case_ , return_tensors="pt" ) with torch.no_grad(): UpperCAmelCase_ = model(**snake_case_ ) if model_name == "groupvit-gcc-yfcc": UpperCAmelCase_ = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": UpperCAmelCase_ = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(f"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , snake_case_ , atol=1E-3 ) processor.save_pretrained(snake_case_ ) model.save_pretrained(snake_case_ ) print("Successfully saved processor and model to" , snake_case_ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(snake_case_ , organization="nielsr" ) model.push_to_hub(snake_case_ , organization="nielsr" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Union[str, Any] =argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
78
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# SCREAMING_SNAKE_CASE_: Dict =[ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] SCREAMING_SNAKE_CASE_: List[Any] =[ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] SCREAMING_SNAKE_CASE_: Union[str, Any] =[] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0." unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}." SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1." unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0." unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}." SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1." unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv." SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op." unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0." SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}." unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.' SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}." SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}." unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: UpperCAmelCase_ = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# SCREAMING_SNAKE_CASE_: int =[ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}." vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0." SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample." vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0." SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample." vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}." vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}." SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}." vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) SCREAMING_SNAKE_CASE_: int =[ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple: '''simple docstring''' return w.reshape(*w.shape , 1 , 1 ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()} UpperCAmelCase_ = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# SCREAMING_SNAKE_CASE_: List[Any] =[ # (stable-diffusion, HF Diffusers) ('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'), ] SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst} SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2} def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = {} UpperCAmelCase_ = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): UpperCAmelCase_ = k[: -len(".q_proj.weight" )] UpperCAmelCase_ = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: UpperCAmelCase_ = [None, None, None] UpperCAmelCase_ = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): UpperCAmelCase_ = k[: -len(".q_proj.bias" )] UpperCAmelCase_ = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: UpperCAmelCase_ = [None, None, None] UpperCAmelCase_ = v continue UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = torch.cat(snake_case_ ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = torch.cat(snake_case_ ) return new_state_dict def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]: '''simple docstring''' return text_enc_dict if __name__ == "__main__": SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.' ) SCREAMING_SNAKE_CASE_: Dict =parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors') SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors') SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu') else: SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin') SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu') if osp.exists(vae_path): SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu') else: SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin') SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu') if osp.exists(text_enc_path): SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu') else: SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin') SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu') # Convert the UNet model SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict) SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict) SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()} SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict) SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()} else: SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict) SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict} torch.save(state_dict, args.checkpoint_path)
78
1
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class __A : def __init__(self : str , __a : Any , __a : str=13 , __a : List[str]=2 , __a : Any=24 , __a : Any=16 , __a : List[str]=True , __a : Tuple=True , __a : Any=32 , __a : str=5 , __a : Optional[int]=4 , __a : List[Any]=37 , __a : Optional[Any]="gelu" , __a : str=0.1 , __a : Tuple=0.1 , __a : Tuple=10 , __a : Any=0.02 , __a : Any=None , __a : Union[str, Any]=2 , __a : Tuple=2 , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = max_length UpperCAmelCase_ = num_mel_bins UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = scope UpperCAmelCase_ = frequency_stride UpperCAmelCase_ = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCAmelCase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 UpperCAmelCase_ = (self.max_length - self.patch_size) // self.time_stride + 1 UpperCAmelCase_ = frequency_out_dimension * time_out_dimension UpperCAmelCase_ = num_patches + 2 def _lowercase (self : List[str] ): UpperCAmelCase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ = self.get_config() return config, input_values, labels def _lowercase (self : Optional[int] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _lowercase (self : str , __a : Any , __a : str , __a : str ): UpperCAmelCase_ = ASTModel(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase (self : Dict ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"input_values": input_values} return config, inputs_dict @require_torch class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : Optional[int] = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) a__ : int = ( {"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel} if is_torch_available() else {} ) a__ : int = False a__ : Union[str, Any] = False a__ : List[str] = False a__ : Any = False def _lowercase (self : Optional[Any] , __a : str , __a : List[Any] , __a : Optional[Any] , __a : str , __a : str ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = ASTModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def _lowercase (self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _lowercase (self : List[str] ): pass def _lowercase (self : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def _lowercase (self : Dict ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__a ) UpperCAmelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ["input_values"] self.assertListEqual(arg_names[:1] , __a ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) @slow def _lowercase (self : List[str] ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = ASTModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCAmelCase_ ( ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) UpperCAmelCase_ , UpperCAmelCase_ = torchaudio.load(snake_case_ ) return audio, sampling_rate @require_torch @require_torchaudio class __A ( unittest.TestCase ): @cached_property def _lowercase (self : str ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.default_feature_extractor UpperCAmelCase_ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__a ) UpperCAmelCase_ = self.default_feature_extractor UpperCAmelCase_ , UpperCAmelCase_ = prepare_audio() UpperCAmelCase_ = audio.squeeze().numpy() UpperCAmelCase_ = feature_extractor(__a , sampling_rate=__a , return_tensors="pt" ).to(__a ) # forward pass with torch.no_grad(): UpperCAmelCase_ = model(**__a ) # verify the logits UpperCAmelCase_ = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , __a ) UpperCAmelCase_ = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
78
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float: '''simple docstring''' return np.dot(snake_case_ , snake_case_ ) class __A : def __init__(self : int , *, __a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ): UpperCAmelCase_ = regularization UpperCAmelCase_ = gamma if kernel == "linear": UpperCAmelCase_ = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma" ) if not isinstance(self.gamma , (float, int) ): raise ValueError("gamma must be float or int" ) if not self.gamma > 0: raise ValueError("gamma must be > 0" ) UpperCAmelCase_ = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: UpperCAmelCase_ = f"""Unknown kernel: {kernel}""" raise ValueError(__a ) def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ): return np.dot(__a , __a ) def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ): UpperCAmelCase_ = observations UpperCAmelCase_ = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((UpperCAmelCase_) , ) = np.shape(__a ) def to_minimize(__a : ndarray ) -> float: UpperCAmelCase_ = 0 ((UpperCAmelCase_) , ) = np.shape(__a ) for i in range(__a ): for j in range(__a ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(__a ) UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 ) UpperCAmelCase_ = Bounds(0 , self.regularization ) UpperCAmelCase_ = minimize( __a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x UpperCAmelCase_ = l_star # calculating mean offset of separation plane to points UpperCAmelCase_ = 0 for i in range(__a ): for j in range(__a ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) UpperCAmelCase_ = s / n def _lowercase (self : Optional[int] , __a : ndarray ): UpperCAmelCase_ = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , __a ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
78
1
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels SCREAMING_SNAKE_CASE_: List[Any] =object() # For specifying empty leaf dict `{}` SCREAMING_SNAKE_CASE_: List[Any] =object() def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(snake_case_ ) - len(snake_case_ ) + 1 ): UpperCAmelCase_ = [x.match(snake_case_ ) for x, y in zip(snake_case_ , ks[i:] )] if matches and all(snake_case_ ): return True return False def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Tuple: '''simple docstring''' def replace(snake_case_ : Optional[int] , snake_case_ : List[str] ): for rule, replacement in rules: if _match(snake_case_ , snake_case_ ): return replacement return val return replace def lowerCAmelCase_ ( ) -> Any: '''simple docstring''' return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , snake_case_ )), (("transformer", "wte", "embedding"), P("mp" , snake_case_ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(snake_case_ , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , snake_case_ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(snake_case_ , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , snake_case_ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def lowerCAmelCase_ ( snake_case_ : int ) -> Any: '''simple docstring''' UpperCAmelCase_ = _get_partition_rules() UpperCAmelCase_ = _replacement_rules(snake_case_ ) UpperCAmelCase_ = {k: _unmatched for k in flatten_dict(snake_case_ )} UpperCAmelCase_ = {k: replace(snake_case_ , snake_case_ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(snake_case_ ) )
78
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: List[Any] ={ 'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __A ( UpperCamelCase__ ): a__ : List[Any] = """perceiver""" def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ): super().__init__(**__a ) UpperCAmelCase_ = num_latents UpperCAmelCase_ = d_latents UpperCAmelCase_ = d_model UpperCAmelCase_ = num_blocks UpperCAmelCase_ = num_self_attends_per_block UpperCAmelCase_ = num_self_attention_heads UpperCAmelCase_ = num_cross_attention_heads UpperCAmelCase_ = qk_channels UpperCAmelCase_ = v_channels UpperCAmelCase_ = cross_attention_shape_for_attention UpperCAmelCase_ = self_attention_widening_factor UpperCAmelCase_ = cross_attention_widening_factor UpperCAmelCase_ = hidden_act UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = use_query_residual # masked language modeling attributes UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings # image classification attributes UpperCAmelCase_ = image_size # flow attributes UpperCAmelCase_ = train_size # multimodal autoencoding attributes UpperCAmelCase_ = num_frames UpperCAmelCase_ = audio_samples_per_frame UpperCAmelCase_ = samples_per_patch UpperCAmelCase_ = output_shape class __A ( UpperCamelCase__ ): @property def _lowercase (self : Dict ): if self.task == "multiple-choice": UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def _lowercase (self : Optional[Any] ): return 1E-4 def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(__a , __a ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a ) UpperCAmelCase_ = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) ) UpperCAmelCase_ = inputs.pop("input_ids" ) return inputs elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch ) UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a ) UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) ) UpperCAmelCase_ = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
78
1
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]: '''simple docstring''' UpperCAmelCase_ = [] if isinstance(snake_case_ , snake_case_ ): for v in tree.values(): shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]: '''simple docstring''' UpperCAmelCase_ = [] for d in reversed(snake_case_ ): idx.append(flat_idx % d ) UpperCAmelCase_ = flat_idx // d return tuple(reversed(snake_case_ ) ) @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]: '''simple docstring''' def reduce_edge_list(snake_case_ : List[bool] ) -> None: UpperCAmelCase_ = True for i in range(len(snake_case_ ) ): UpperCAmelCase_ = -1 * (i + 1) l[reversed_idx] &= tally UpperCAmelCase_ = l[reversed_idx] if start_edges is None: UpperCAmelCase_ = [s == 0 for s in start] reduce_edge_list(snake_case_ ) if end_edges is None: UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )] reduce_edge_list(snake_case_ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(snake_case_ ) == 0: return [()] elif len(snake_case_ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] UpperCAmelCase_ = [] UpperCAmelCase_ = [] # Dimensions common to start and end can be selected directly for s, e in zip(snake_case_ , snake_case_ ): if s == e: path_list.append(slice(snake_case_ , s + 1 ) ) else: break UpperCAmelCase_ = tuple(snake_case_ ) UpperCAmelCase_ = len(snake_case_ ) # start == end, and we're done if divergence_idx == len(snake_case_ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase_ = start[divergence_idx] return tuple( path + (slice(snake_case_ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase_ = end[divergence_idx] return tuple( path + (slice(snake_case_ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor: '''simple docstring''' UpperCAmelCase_ = t.shape[:no_batch_dims] UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) ) # _get_minimal_slice_set is inclusive UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) ) # Get an ordered list of slices to perform UpperCAmelCase_ = _get_minimal_slice_set( snake_case_ , snake_case_ , snake_case_ , ) UpperCAmelCase_ = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any: '''simple docstring''' if not (len(snake_case_ ) > 0): raise ValueError("Must provide at least one input" ) UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )] UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] ) def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ ) UpperCAmelCase_ = None if _out is not None: UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) UpperCAmelCase_ = 1 for d in orig_batch_dims: flat_batch_dim *= d UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t UpperCAmelCase_ = 0 UpperCAmelCase_ = prepped_outputs for _ in range(snake_case_ ): # Chunk the input if not low_mem: UpperCAmelCase_ = _select_chunk else: UpperCAmelCase_ = partial( _chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , ) UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ ) # Run the layer on the chunk UpperCAmelCase_ = layer(**snake_case_ ) # Allocate space for the output if out is None: UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ ) # Put the chunk in its pre-allocated space if isinstance(snake_case_ , snake_case_ ): def assign(snake_case_ : dict , snake_case_ : dict ) -> None: for k, v in da.items(): if isinstance(snake_case_ , snake_case_ ): assign(snake_case_ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: UpperCAmelCase_ = da[k] assign(snake_case_ , snake_case_ ) elif isinstance(snake_case_ , snake_case_ ): for xa, xa in zip(snake_case_ , snake_case_ ): if _add_into_out: xa[i : i + chunk_size] += xa else: UpperCAmelCase_ = xa elif isinstance(snake_case_ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: UpperCAmelCase_ = output_chunk else: raise ValueError("Not supported" ) i += chunk_size UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ ) return out class __A : def __init__(self : Dict , __a : int = 512 , ): UpperCAmelCase_ = max_chunk_size UpperCAmelCase_ = None UpperCAmelCase_ = None def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ): logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size] UpperCAmelCase_ = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__a : int ) -> bool: try: with torch.no_grad(): fn(*__a , chunk_size=__a ) return True except RuntimeError: return False UpperCAmelCase_ = 0 UpperCAmelCase_ = len(__a ) - 1 while i > min_viable_chunk_size_index: UpperCAmelCase_ = test_chunk_size(candidates[i] ) if not viable: UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2 else: UpperCAmelCase_ = i UpperCAmelCase_ = (i + len(__a ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _lowercase (self : int , __a : Iterable , __a : Iterable ): UpperCAmelCase_ = True for aa, aa in zip(__a , __a ): assert type(__a ) == type(__a ) if isinstance(__a , (list, tuple) ): consistent &= self._compare_arg_caches(__a , __a ) elif isinstance(__a , __a ): UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] consistent &= self._compare_arg_caches(__a , __a ) else: consistent &= aa == aa return consistent def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ): UpperCAmelCase_ = True UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(__a ) UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a ) else: # Otherwise, we can reuse the precomputed value UpperCAmelCase_ = False if not consistent: UpperCAmelCase_ = self._determine_favorable_chunk_size( __a , __a , __a , ) UpperCAmelCase_ = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
78
'''simple docstring''' import requests def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None: '''simple docstring''' UpperCAmelCase_ = {"Content-Type": "application/json"} UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ ) if response.status_code != 2_00: UpperCAmelCase_ = ( "Request to slack returned an error " f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(snake_case_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
78
1
'''simple docstring''' from __future__ import annotations SCREAMING_SNAKE_CASE_: str =[True] * 1_00_00_01 SCREAMING_SNAKE_CASE_: List[Any] =2 while i * i <= 1_00_00_00: if seive[i]: for j in range(i * i, 1_00_00_01, i): SCREAMING_SNAKE_CASE_: str =False i += 1 def lowerCAmelCase_ ( snake_case_ : int ) -> bool: '''simple docstring''' return seive[n] def lowerCAmelCase_ ( snake_case_ : int ) -> bool: '''simple docstring''' return any(digit in "02468" for digit in str(snake_case_ ) ) def lowerCAmelCase_ ( snake_case_ : int = 1_00_00_00 ) -> list[int]: '''simple docstring''' UpperCAmelCase_ = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(snake_case_ ) and not contains_an_even_digit(snake_case_ ): UpperCAmelCase_ = str(snake_case_ ) UpperCAmelCase_ = [int(str_num[j:] + str_num[:j] ) for j in range(len(snake_case_ ) )] if all(is_prime(snake_case_ ) for i in list_nums ): result.append(snake_case_ ) return result def lowerCAmelCase_ ( ) -> int: '''simple docstring''' return len(find_circular_primes() ) if __name__ == "__main__": print(f"{len(find_circular_primes()) = }")
78
'''simple docstring''' from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name class __A ( UpperCamelCase__ ): def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ): super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: UpperCAmelCase_ = ( f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a ) UpperCAmelCase_ = dict(scheduler.config ) UpperCAmelCase_ = 1 UpperCAmelCase_ = FrozenDict(__a ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: UpperCAmelCase_ = ( f"""The configuration file of this scheduler: {scheduler} has not set the configuration""" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a ) UpperCAmelCase_ = dict(scheduler.config ) UpperCAmelCase_ = True UpperCAmelCase_ = FrozenDict(__a ) if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , ) def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__a ) def _lowercase (self : int ): self.enable_attention_slicing(__a ) def _lowercase (self : Optional[Any] ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase_ = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(__a , __a ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowercase (self : Optional[int] ): if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__a , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ): UpperCAmelCase_ = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) UpperCAmelCase_ = self.segmentation_model(**__a ) UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size ) # Run inpainting pipeline with the generated mask UpperCAmelCase_ = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
78
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int = 0 ) -> list: '''simple docstring''' UpperCAmelCase_ = length or len(snake_case_ ) UpperCAmelCase_ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: UpperCAmelCase_ , UpperCAmelCase_ = list_data[i + 1], list_data[i] UpperCAmelCase_ = True return list_data if not swapped else bubble_sort(snake_case_ , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
78
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int ) -> bool: '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
78
1
'''simple docstring''' import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ : str ) -> int: '''simple docstring''' UpperCAmelCase_ = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] ) UpperCAmelCase_ = MaskFormerConfig(backbone_config=snake_case_ ) UpperCAmelCase_ = "huggingface/label-files" if "ade20k-full" in model_name: # this should be ok UpperCAmelCase_ = 8_47 UpperCAmelCase_ = "maskformer-ade20k-full-id2label.json" elif "ade" in model_name: # this should be ok UpperCAmelCase_ = 1_50 UpperCAmelCase_ = "ade20k-id2label.json" elif "coco-stuff" in model_name: # this should be ok UpperCAmelCase_ = 1_71 UpperCAmelCase_ = "maskformer-coco-stuff-id2label.json" elif "coco" in model_name: # TODO UpperCAmelCase_ = 1_33 UpperCAmelCase_ = "coco-panoptic-id2label.json" elif "cityscapes" in model_name: # this should be ok UpperCAmelCase_ = 19 UpperCAmelCase_ = "cityscapes-id2label.json" elif "vistas" in model_name: # this should be ok UpperCAmelCase_ = 65 UpperCAmelCase_ = "mapillary-vistas-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_ : List[str] ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = [] # stem # fmt: off rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") ) rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") ) # FPN rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") ) rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") ) rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") ) rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") ) rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") ) rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") ) rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") ) rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") ) # cross-attention out projection rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") ) # MLP 1 rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") ) # MLP 2 rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") ) # layernorm 1 (self-attention layernorm) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") ) # layernorm 3 (final layernorm) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") ) # heads on top rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") ) rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") ) rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") ) for i in range(3 ): rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") ) rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") ) # fmt: on return rename_keys def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Any ) -> Any: '''simple docstring''' UpperCAmelCase_ = dct.pop(snake_case_ ) UpperCAmelCase_ = val def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): UpperCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" ) UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:dim, :] UpperCAmelCase_ = in_proj_bias[: dim] UpperCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] UpperCAmelCase_ = in_proj_bias[ dim : dim * 2 ] UpperCAmelCase_ = in_proj_weight[ -dim :, : ] UpperCAmelCase_ = in_proj_bias[-dim :] # fmt: on def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : str ) -> str: '''simple docstring''' UpperCAmelCase_ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[: hidden_size, :] UpperCAmelCase_ = in_proj_bias[:config.hidden_size] UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :] UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2] UpperCAmelCase_ = in_proj_weight[-hidden_size :, :] UpperCAmelCase_ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[: hidden_size, :] UpperCAmelCase_ = in_proj_bias[:config.hidden_size] UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :] UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2] UpperCAmelCase_ = in_proj_weight[-hidden_size :, :] UpperCAmelCase_ = in_proj_bias[-hidden_size :] # fmt: on def lowerCAmelCase_ ( ) -> torch.Tensor: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : str , snake_case_ : bool = False ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = get_maskformer_config(snake_case_ ) # load original state_dict with open(snake_case_ , "rb" ) as f: UpperCAmelCase_ = pickle.load(snake_case_ ) UpperCAmelCase_ = data["model"] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys UpperCAmelCase_ = create_rename_keys(snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) read_in_swin_q_k_v(snake_case_ , config.backbone_config ) read_in_decoder_q_k_v(snake_case_ , snake_case_ ) # update to torch tensors for key, value in state_dict.items(): UpperCAmelCase_ = torch.from_numpy(snake_case_ ) # load 🤗 model UpperCAmelCase_ = MaskFormerForInstanceSegmentation(snake_case_ ) model.eval() for name, param in model.named_parameters(): print(snake_case_ , param.shape ) UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(snake_case_ ) == 0, f"""Unexpected keys: {unexpected_keys}""" # verify results UpperCAmelCase_ = prepare_img() if "vistas" in model_name: UpperCAmelCase_ = 65 elif "cityscapes" in model_name: UpperCAmelCase_ = 6_55_35 else: UpperCAmelCase_ = 2_55 UpperCAmelCase_ = True if "ade" in model_name else False UpperCAmelCase_ = MaskFormerImageProcessor(ignore_index=snake_case_ , reduce_labels=snake_case_ ) UpperCAmelCase_ = image_processor(snake_case_ , return_tensors="pt" ) UpperCAmelCase_ = model(**snake_case_ ) print("Logits:" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": UpperCAmelCase_ = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) image_processor.save_pretrained(snake_case_ ) if push_to_hub: print("Pushing model and image processor to the hub..." ) model.push_to_hub(f"""nielsr/{model_name}""" ) image_processor.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: List[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) SCREAMING_SNAKE_CASE_: Tuple =parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
78
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class __A : a__ : int a__ : TreeNode | None = None a__ : TreeNode | None = None SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess') def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int: '''simple docstring''' if root is None: return 0 # Validation def count_nodes(snake_case_ : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(snake_case_ : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(snake_case_ ) != count_coins(snake_case_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left ) UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right ) UpperCAmelCase_ = 1 - left_distrib_excess UpperCAmelCase_ = 1 - right_distrib_excess UpperCAmelCase_ = ( left_distrib_moves + right_distrib_moves + abs(snake_case_ ) + abs(snake_case_ ) ) UpperCAmelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(snake_case_ , snake_case_ ) return get_distrib(snake_case_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
78
1
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') SCREAMING_SNAKE_CASE_: Dict =parser.parse_args() if args.model_type == "bert": SCREAMING_SNAKE_CASE_: Optional[Any] =BertForMaskedLM.from_pretrained(args.model_name) SCREAMING_SNAKE_CASE_: str ='bert' else: raise ValueError('args.model_type should be "bert".') SCREAMING_SNAKE_CASE_: Any =model.state_dict() SCREAMING_SNAKE_CASE_: int ={} for w in ["word_embeddings", "position_embeddings"]: SCREAMING_SNAKE_CASE_: Union[str, Any] =state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE_: List[str] =state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] SCREAMING_SNAKE_CASE_: str =0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE_: Optional[int] =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] SCREAMING_SNAKE_CASE_: Dict =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] SCREAMING_SNAKE_CASE_: Union[str, Any] =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] SCREAMING_SNAKE_CASE_: str =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] SCREAMING_SNAKE_CASE_: List[str] =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] SCREAMING_SNAKE_CASE_: Any =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] SCREAMING_SNAKE_CASE_: Tuple =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] SCREAMING_SNAKE_CASE_: Optional[Any] =state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 SCREAMING_SNAKE_CASE_: Any =state_dict['cls.predictions.decoder.weight'] SCREAMING_SNAKE_CASE_: Union[str, Any] =state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE_: str =state_dict[f"cls.predictions.transform.dense.{w}"] SCREAMING_SNAKE_CASE_: Optional[int] =state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
78
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE_: int =logging.getLogger() def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ = parser.parse_args() return args.f def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" ) if os.path.exists(snake_case_ ): with open(snake_case_ , "r" ) as f: UpperCAmelCase_ = json.load(snake_case_ ) else: raise ValueError(f"""can't find {path}""" ) return results def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __A ( UpperCamelCase__ ): @classmethod def _lowercase (cls : Any ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def _lowercase (cls : int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : int ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 28 ) self.assertGreaterEqual(result["eval_exact"] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : str ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_rouge1"] , 10 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : List[str] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_bleu"] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) ) @slow def _lowercase (self : Dict ): UpperCAmelCase_ = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Any ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
78
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : Optional[Any]=2_81_23 ) -> str: '''simple docstring''' UpperCAmelCase_ = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i UpperCAmelCase_ = set() UpperCAmelCase_ = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(snake_case_ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
78
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP SCREAMING_SNAKE_CASE_: Any =False try: SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab') except ModuleNotFoundError: pass @input.register class __A : def __init__(self : int , __a : str = None , __a : list = [] ): UpperCAmelCase_ = 0 UpperCAmelCase_ = choices UpperCAmelCase_ = prompt if sys.platform == "win32": UpperCAmelCase_ = "*" else: UpperCAmelCase_ = "➔ " def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ): if sys.platform != "win32": writeColor(self.choices[index] , 32 , __a ) else: forceWrite(self.choices[index] , __a ) def _lowercase (self : Any , __a : int ): if index == self.position: forceWrite(f""" {self.arrow_char} """ ) self.write_choice(__a ) else: forceWrite(f""" {self.choices[index]}""" ) reset_cursor() def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ): UpperCAmelCase_ = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__a ) move_cursor(__a , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def _lowercase (self : Dict ): self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def _lowercase (self : Any ): self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def _lowercase (self : Optional[Any] ): move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def _lowercase (self : str ): move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = int(chr(self.current_selection ) ) UpperCAmelCase_ = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , __a ) else: return else: return def _lowercase (self : Optional[Any] , __a : int = 0 ): if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) UpperCAmelCase_ = default_choice for i in range(len(self.choices ) ): self.print_choice(__a ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: UpperCAmelCase_ = int(builtins.input() ) except ValueError: UpperCAmelCase_ = default_choice else: UpperCAmelCase_ = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(__a , "\n" ) return choice
78
1
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput SCREAMING_SNAKE_CASE_: Optional[int] =8 def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any]=BITS ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = x.device UpperCAmelCase_ = (x * 2_55).int().clamp(0 , 2_55 ) UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case_ ) UpperCAmelCase_ = rearrange(snake_case_ , "d -> d 1 1" ) UpperCAmelCase_ = rearrange(snake_case_ , "b c h w -> b c 1 h w" ) UpperCAmelCase_ = ((x & mask) != 0).float() UpperCAmelCase_ = rearrange(snake_case_ , "b c d h w -> b (c d) h w" ) UpperCAmelCase_ = bits * 2 - 1 return bits def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : str=BITS ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = x.device UpperCAmelCase_ = (x > 0).int() UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case_ , dtype=torch.intaa ) UpperCAmelCase_ = rearrange(snake_case_ , "d -> d 1 1" ) UpperCAmelCase_ = rearrange(snake_case_ , "b (c d) h w -> b c d h w" , d=8 ) UpperCAmelCase_ = reduce(x * mask , "b c d h w -> b c h w" , "sum" ) return (dec / 2_55).clamp(0.0 , 1.0 ) def lowerCAmelCase_ ( self : Optional[int] , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : float = 0.0 , snake_case_ : bool = True , snake_case_ : Optional[int]=None , snake_case_ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: '''simple docstring''' if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) UpperCAmelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas UpperCAmelCase_ = self.alphas_cumprod[timestep] UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod UpperCAmelCase_ = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" UpperCAmelCase_ = self.bit_scale if self.config.clip_sample: UpperCAmelCase_ = torch.clamp(snake_case_ , -scale , snake_case_ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) UpperCAmelCase_ = self._get_variance(snake_case_ , snake_case_ ) UpperCAmelCase_ = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 UpperCAmelCase_ = model_output.device if torch.is_tensor(snake_case_ ) else "cpu" UpperCAmelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case_ ).to(snake_case_ ) UpperCAmelCase_ = self._get_variance(snake_case_ , snake_case_ ) ** 0.5 * eta * noise UpperCAmelCase_ = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=snake_case_ , pred_original_sample=snake_case_ ) def lowerCAmelCase_ ( self : Union[str, Any] , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : List[Any]="epsilon" , snake_case_ : List[str]=None , snake_case_ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]: '''simple docstring''' UpperCAmelCase_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: UpperCAmelCase_ , UpperCAmelCase_ = torch.split(snake_case_ , sample.shape[1] , dim=1 ) else: UpperCAmelCase_ = None # 1. compute alphas, betas UpperCAmelCase_ = self.alphas_cumprod[t] UpperCAmelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one UpperCAmelCase_ = 1 - alpha_prod_t UpperCAmelCase_ = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": UpperCAmelCase_ = model_output else: raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" UpperCAmelCase_ = self.bit_scale if self.config.clip_sample: UpperCAmelCase_ = torch.clamp(snake_case_ , -scale , snake_case_ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t UpperCAmelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCAmelCase_ = 0 if t > 0: UpperCAmelCase_ = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case_ ).to(model_output.device ) UpperCAmelCase_ = (self._get_variance(snake_case_ , predicted_variance=snake_case_ ) ** 0.5) * noise UpperCAmelCase_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=snake_case_ , pred_original_sample=snake_case_ ) class __A ( UpperCamelCase__ ): def __init__(self : str , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, DDPMScheduler] , __a : Optional[float] = 1.0 , ): super().__init__() UpperCAmelCase_ = bit_scale UpperCAmelCase_ = ( ddim_bit_scheduler_step if isinstance(__a , __a ) else ddpm_bit_scheduler_step ) self.register_modules(unet=__a , scheduler=__a ) @torch.no_grad() def __call__(self : Union[str, Any] , __a : Optional[int] = 256 , __a : Optional[int] = 256 , __a : Optional[int] = 50 , __a : Optional[torch.Generator] = None , __a : Optional[int] = 1 , __a : Optional[str] = "pil" , __a : bool = True , **__a : Tuple , ): UpperCAmelCase_ = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=__a , ) UpperCAmelCase_ = decimal_to_bits(__a ) * self.bit_scale UpperCAmelCase_ = latents.to(self.device ) self.scheduler.set_timesteps(__a ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual UpperCAmelCase_ = self.unet(__a , __a ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ = self.scheduler.step(__a , __a , __a ).prev_sample UpperCAmelCase_ = bits_to_decimal(__a ) if output_type == "pil": UpperCAmelCase_ = self.numpy_to_pil(__a ) if not return_dict: return (image,) return ImagePipelineOutput(images=__a )
78
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor'] SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =[ 'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: int =[ 'FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
78
1
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Dict ={ 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class __A ( UpperCamelCase__ ): a__ : List[Any] = """t5""" a__ : List[str] = ["""past_key_values"""] a__ : List[str] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__(self : Tuple , __a : str=32128 , __a : Union[str, Any]=512 , __a : Union[str, Any]=64 , __a : List[str]=2048 , __a : List[str]=6 , __a : List[Any]=None , __a : str=8 , __a : Optional[int]=32 , __a : Union[str, Any]=128 , __a : Tuple=0.1 , __a : Union[str, Any]=1E-6 , __a : Optional[Any]=1.0 , __a : str="relu" , __a : List[Any]=True , __a : Tuple=True , __a : Optional[int]=0 , __a : Optional[int]=1 , **__a : str , ): UpperCAmelCase_ = vocab_size UpperCAmelCase_ = d_model UpperCAmelCase_ = d_kv UpperCAmelCase_ = d_ff UpperCAmelCase_ = num_layers UpperCAmelCase_ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ = num_heads UpperCAmelCase_ = relative_attention_num_buckets UpperCAmelCase_ = relative_attention_max_distance UpperCAmelCase_ = dropout_rate UpperCAmelCase_ = layer_norm_epsilon UpperCAmelCase_ = initializer_factor UpperCAmelCase_ = feed_forward_proj UpperCAmelCase_ = use_cache UpperCAmelCase_ = self.feed_forward_proj.split("-" ) UpperCAmelCase_ = act_info[-1] UpperCAmelCase_ = act_info[0] == "gated" if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ = "gelu_new" super().__init__( pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , ) class __A ( UpperCamelCase__ ): @property def _lowercase (self : Any ): UpperCAmelCase_ = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ = "past_encoder_sequence + sequence" UpperCAmelCase_ = {0: "batch"} UpperCAmelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__a , direction="inputs" ) return common_inputs @property def _lowercase (self : int ): return 13
78
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed SCREAMING_SNAKE_CASE_: Any ={ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCAmelCase_ ( snake_case_ : Any ) -> str: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False elif args.student_type == "gpt2": UpperCAmelCase_ = False def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser(description="Training" ) parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." ) parser.add_argument( "--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." ) parser.add_argument( "--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." ) parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." ) parser.add_argument( "--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." ) parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." ) parser.add_argument( "--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." ) parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." ) parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." ) parser.add_argument( "--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." ) parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." ) parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." ) parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." ) parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." ) parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." ) parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." ) parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=snake_case_ , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." ) parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" ) parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" ) parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." ) parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." ) UpperCAmelCase_ = parser.parse_args() sanity_checks(snake_case_ ) # ARGS # init_gpu_params(snake_case_ ) set_seed(snake_case_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f: json.dump(vars(snake_case_ ) , snake_case_ , indent=4 ) git_log(args.dump_path ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCAmelCase_ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ ) UpperCAmelCase_ = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) UpperCAmelCase_ = special_tok_ids UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCAmelCase_ = 0.0 # do not predict special tokens UpperCAmelCase_ = torch.from_numpy(snake_case_ ) else: UpperCAmelCase_ = None UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ ) logger.info("Data loader created." ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config ) UpperCAmelCase_ = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ ) else: UpperCAmelCase_ = student_model_class(snake_case_ ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info("Student loaded." ) # TEACHER # UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case_ , snake_case_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case_ , snake_case_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCAmelCase_ = Distiller( params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ ) distiller.train() logger.info("Let's go get some drinks." ) if __name__ == "__main__": main()
78
1
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __A ( UpperCamelCase__ ): a__ : Optional[int] = ["""image_processor""", """tokenizer"""] a__ : str = """OwlViTImageProcessor""" a__ : int = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__(self : Optional[int] , __a : List[Any]=None , __a : str=None , **__a : List[Any] ): UpperCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) UpperCAmelCase_ = kwargs.pop("feature_extractor" ) UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__(self : int , __a : List[str]=None , __a : Union[str, Any]=None , __a : int=None , __a : Dict="max_length" , __a : List[Any]="np" , **__a : Tuple ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )): UpperCAmelCase_ = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )] elif isinstance(__a , __a ) and isinstance(text[0] , __a ): UpperCAmelCase_ = [] # Maximum number of queries across batch UpperCAmelCase_ = max([len(__a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__a ) != max_num_queries: UpperCAmelCase_ = t + [" "] * (max_num_queries - len(__a )) UpperCAmelCase_ = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a ) encodings.append(__a ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": UpperCAmelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) UpperCAmelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp UpperCAmelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) UpperCAmelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch UpperCAmelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) UpperCAmelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf UpperCAmelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) UpperCAmelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) UpperCAmelCase_ = BatchEncoding() UpperCAmelCase_ = input_ids UpperCAmelCase_ = attention_mask if query_images is not None: UpperCAmelCase_ = BatchEncoding() UpperCAmelCase_ = self.image_processor( __a , return_tensors=__a , **__a ).pixel_values UpperCAmelCase_ = query_pixel_values if images is not None: UpperCAmelCase_ = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: UpperCAmelCase_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: UpperCAmelCase_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def _lowercase (self : List[str] , *__a : Optional[Any] , **__a : Dict ): return self.image_processor.post_process(*__a , **__a ) def _lowercase (self : List[Any] , *__a : Optional[int] , **__a : Optional[int] ): return self.image_processor.post_process_object_detection(*__a , **__a ) def _lowercase (self : Optional[int] , *__a : int , **__a : Union[str, Any] ): return self.image_processor.post_process_image_guided_detection(*__a , **__a ) def _lowercase (self : List[str] , *__a : List[str] , **__a : List[str] ): return self.tokenizer.batch_decode(*__a , **__a ) def _lowercase (self : int , *__a : List[Any] , **__a : Tuple ): return self.tokenizer.decode(*__a , **__a ) @property def _lowercase (self : Optional[Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def _lowercase (self : Union[str, Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
78
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : int = AutoencoderKL a__ : Optional[Any] = """sample""" a__ : Union[str, Any] = 1e-2 @property def _lowercase (self : Optional[int] ): UpperCAmelCase_ = 4 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a ) return {"sample": image} @property def _lowercase (self : Any ): return (3, 32, 32) @property def _lowercase (self : Dict ): return (3, 32, 32) def _lowercase (self : int ): UpperCAmelCase_ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict def _lowercase (self : int ): pass def _lowercase (self : int ): pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _lowercase (self : List[Any] ): # enable deterministic behavior for gradient checkpointing UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ = self.model_class(**__a ) model.to(__a ) assert not model.is_gradient_checkpointing and model.training UpperCAmelCase_ = model(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() UpperCAmelCase_ = torch.randn_like(__a ) UpperCAmelCase_ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing UpperCAmelCase_ = self.model_class(**__a ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__a ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training UpperCAmelCase_ = model_a(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() UpperCAmelCase_ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) UpperCAmelCase_ = dict(model.named_parameters() ) UpperCAmelCase_ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _lowercase (self : Any ): UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a ) self.assertIsNotNone(__a ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__a ) UpperCAmelCase_ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _lowercase (self : List[str] ): UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) UpperCAmelCase_ = model.to(__a ) model.eval() if torch_device == "mps": UpperCAmelCase_ = torch.manual_seed(0 ) else: UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase_ = image.to(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": UpperCAmelCase_ = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": UpperCAmelCase_ = torch.tensor( [-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] ) else: UpperCAmelCase_ = torch.tensor( [-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] ) self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) ) @slow class __A ( unittest.TestCase ): def _lowercase (self : Dict , __a : Dict , __a : int ): return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy""" def _lowercase (self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ): UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a ) return image def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ): UpperCAmelCase_ = "fp16" if fpaa else None UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ = AutoencoderKL.from_pretrained( __a , subfolder="vae" , torch_dtype=__a , revision=__a , ) model.to(__a ).eval() return model def _lowercase (self : List[Any] , __a : List[Any]=0 ): if torch_device == "mps": return torch.manual_seed(__a ) return torch.Generator(device=__a ).manual_seed(__a ) @parameterized.expand( [ # fmt: off [33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__a , __a , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]], [47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : Dict , __a : Optional[int] , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__a , __a , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]], [37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : int , __a : int , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]], [16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase (self : List[str] , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase (self : Union[str, Any] , __a : Dict ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]], [47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]], # fmt: on ] ) def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model.encode(__a ).latent_dist UpperCAmelCase_ = dist.sample(generator=__a ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu() UpperCAmelCase_ = torch.tensor(__a ) UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(__a , __a , atol=__a )
78
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __A ( unittest.TestCase ): def __init__(self : List[str] , __a : Any , __a : str=7 , __a : Optional[int]=3 , __a : Tuple=18 , __a : Union[str, Any]=30 , __a : Dict=400 , __a : Tuple=True , __a : List[str]=32 , __a : Optional[int]=True , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = image_size UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution UpperCAmelCase_ = do_resize UpperCAmelCase_ = size_divisor UpperCAmelCase_ = do_rescale def _lowercase (self : int ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : str = GLPNImageProcessor if is_vision_available() else None def _lowercase (self : Tuple ): UpperCAmelCase_ = GLPNImageProcessingTester(self ) @property def _lowercase (self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , "do_resize" ) ) self.assertTrue(hasattr(__a , "size_divisor" ) ) self.assertTrue(hasattr(__a , "resample" ) ) self.assertTrue(hasattr(__a , "do_rescale" ) ) def _lowercase (self : Optional[Any] ): pass def _lowercase (self : Optional[int] ): # Initialize image_processing UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def _lowercase (self : Optional[int] ): # Initialize image_processing UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def _lowercase (self : Tuple ): # Initialize image_processing UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
78
'''simple docstring''' import logging from transformers import PretrainedConfig SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__) SCREAMING_SNAKE_CASE_: Any ={ 'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json', } class __A ( UpperCamelCase__ ): a__ : List[Any] = """bertabs""" def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ): super().__init__(**__a ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_pos UpperCAmelCase_ = enc_layers UpperCAmelCase_ = enc_hidden_size UpperCAmelCase_ = enc_heads UpperCAmelCase_ = enc_ff_size UpperCAmelCase_ = enc_dropout UpperCAmelCase_ = dec_layers UpperCAmelCase_ = dec_hidden_size UpperCAmelCase_ = dec_heads UpperCAmelCase_ = dec_ff_size UpperCAmelCase_ = dec_dropout
78
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): a__ : Union[str, Any] = ["""pixel_values"""] def __init__(self : List[str] , __a : bool = True , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[str] , ): super().__init__(**__a ) UpperCAmelCase_ = size if size is not None else {"shortest_edge": 256} UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a ) UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224} UpperCAmelCase_ = get_size_dict(__a ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = resample UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = crop_size UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase (self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ): UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) UpperCAmelCase_ = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def _lowercase (self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ): UpperCAmelCase_ = get_size_dict(__a ) return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a ) def _lowercase (self : int , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] ): return rescale(__a , scale=__a , data_format=__a , **__a ) def _lowercase (self : str , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ): return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def _lowercase (self : str , __a : ImageInput , __a : Optional[bool] = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ): UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a ) UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ = get_size_dict(__a ) UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(__a ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_center_crop: UpperCAmelCase_ = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images] UpperCAmelCase_ = {"pixel_values": images} return BatchFeature(data=__a , tensor_type=__a )
78
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "imagenet-1k-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = {v: k for k, v in idalabel.items()} UpperCAmelCase_ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" UpperCAmelCase_ = BitConfig( conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , ) return config def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if "stem.conv" in name: UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: UpperCAmelCase_ = name.replace("blocks" , "layers" ) if "head.fc" in name: UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): UpperCAmelCase_ = "bit." + name if "bit" not in name and "classifier" not in name: UpperCAmelCase_ = "bit.encoder." + name return name def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = get_config(snake_case_ ) # load original model from timm UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ ) timm_model.eval() # load state_dict of original model UpperCAmelCase_ = timm_model.state_dict() for key in state_dict.copy().keys(): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val.squeeze() if "head" in key else val # load HuggingFace model UpperCAmelCase_ = BitForImageClassification(snake_case_ ) model.eval() model.load_state_dict(snake_case_ ) # create image processor UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) ) UpperCAmelCase_ = transform.transforms UpperCAmelCase_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } UpperCAmelCase_ = BitImageProcessor( do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ) UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(snake_case_ , snake_case_ ) # verify logits with torch.no_grad(): UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) UpperCAmelCase_ = timm_model(snake_case_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) if push_to_hub: print(f"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(f"""ybelkada/{model_name}""" ) processor.push_to_hub(f"""ybelkada/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
78
1
'''simple docstring''' from collections.abc import Sequence def lowerCAmelCase_ ( snake_case_ : Sequence[int] | None = None ) -> int: '''simple docstring''' if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) UpperCAmelCase_ = nums[0] for i in range(1 , len(snake_case_ ) ): UpperCAmelCase_ = nums[i] UpperCAmelCase_ = max(snake_case_ , ans + num , snake_case_ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user SCREAMING_SNAKE_CASE_: Any =int(input('Enter number of elements : ').strip()) SCREAMING_SNAKE_CASE_: Union[str, Any] =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n] print(max_subsequence_sum(array))
78
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): UpperCAmelCase_ = 0 def _lowercase (self : Tuple ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" ) self.assertIsInstance(__a , __a ) def _lowercase (self : str ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = CLIPConfig() # Create a dummy config file with image_proceesor_type UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict() config_dict.pop("image_processor_type" ) UpperCAmelCase_ = CLIPImageProcessor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) # make sure private variable is not incorrectly saved UpperCAmelCase_ = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def _lowercase (self : int ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Tuple ): with self.assertRaisesRegex( __a , "clip-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" ) def _lowercase (self : Optional[int] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" ) def _lowercase (self : Union[str, Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" ) def _lowercase (self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" ) def _lowercase (self : Optional[int] ): try: AutoConfig.register("custom" , __a ) AutoImageProcessor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoImageProcessor.register(__a , __a ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _lowercase (self : Optional[int] ): class __A ( UpperCamelCase__ ): a__ : str = True try: AutoConfig.register("custom" , __a ) AutoImageProcessor.register(__a , __a ) # If remote code is not set, the default is to use local UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
78
1
'''simple docstring''' import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : int = IFImgaImgSuperResolutionPipeline a__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""} a__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} ) a__ : Dict = PipelineTesterMixin.required_optional_params - {"""latents"""} def _lowercase (self : Tuple ): return self._get_superresolution_dummy_components() def _lowercase (self : Dict , __a : str , __a : Dict=0 ): if str(__a ).startswith("mps" ): UpperCAmelCase_ = torch.manual_seed(__a ) else: UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a ) UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) UpperCAmelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(__a ) ).to(__a ) UpperCAmelCase_ = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _lowercase (self : Tuple ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _lowercase (self : Tuple ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def _lowercase (self : Dict ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def _lowercase (self : List[Any] ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _lowercase (self : Tuple ): self._test_save_load_local() def _lowercase (self : Dict ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
78
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False @dataclass class __A : a__ : Optional[int] = None a__ : bool = True a__ : bool = True a__ : Optional[str] = None # Automatically constructed a__ : ClassVar[str] = "dict" a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ ) def __call__(self : Optional[Any] ): return self.pa_type def _lowercase (self : str , __a : Union[str, bytes, dict] ): try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(__a , __a ): return {"bytes": None, "path": value} elif isinstance(__a , __a ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes UpperCAmelCase_ = BytesIO() sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767 else: UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767 UpperCAmelCase_ = BytesIO(bytes() ) sf.write(__a , __a , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ): if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: UpperCAmelCase_ = token_per_repo_id or {} UpperCAmelCase_ = path.split("::" )[-1] try: UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"] UpperCAmelCase_ = token_per_repo_id[repo_id] except (ValueError, KeyError): UpperCAmelCase_ = None with xopen(__a , "rb" , use_auth_token=__a ) as f: UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a ) else: UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a ) UpperCAmelCase_ = array.T if self.mono: UpperCAmelCase_ = librosa.to_mono(__a ) if self.sampling_rate and self.sampling_rate != sampling_rate: UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate ) UpperCAmelCase_ = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def _lowercase (self : Dict ): from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ): if pa.types.is_string(storage.type ): UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() ) UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: UpperCAmelCase_ = storage.field("bytes" ) else: UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: UpperCAmelCase_ = storage.field("path" ) else: UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) return array_cast(__a , self.pa_type ) def _lowercase (self : Dict , __a : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(__a : Tuple ): with xopen(__a , "rb" ) as f: UpperCAmelCase_ = f.read() return bytes_ UpperCAmelCase_ = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) UpperCAmelCase_ = pa.array( [os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(__a , self.pa_type )
78
1
'''simple docstring''' import math def lowerCAmelCase_ ( snake_case_ : int ) -> bool: '''simple docstring''' assert isinstance(snake_case_ , snake_case_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase_ = range(3 , int(math.sqrt(snake_case_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[int]=1 , **snake_case_ : Union[str, Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = factor * value UpperCAmelCase_ = value while not is_prime(snake_case_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **snake_case_ ) return value
78
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" ) UpperCAmelCase_ = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ ) return image def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if "visual_encoder" in key: UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ ) if "blocks" in key: UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ ) if "attn" in key: UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ ) if "norm1" in key: UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ ) if "norm2" in key: UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ ) if "encoder.norm" in key: UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ ) if "encoder.patch_embed.proj" in key: UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ ) if "encoder.pos_embed" in key: UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ ) if "encoder.cls_token" in key: UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ ) if "self_attn" in key: UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ ) return key @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ ) else: UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} ) UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval() UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth" UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" ) UpperCAmelCase_ = pt_model.eval() UpperCAmelCase_ = pt_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value hf_model.load_state_dict(snake_case_ ) UpperCAmelCase_ = 3_84 UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" ) UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ ) assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] UpperCAmelCase_ = hf_model.generate(snake_case_ ) assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(snake_case_ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' UpperCAmelCase_ = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth" ) UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" ) vqa_model.eval() UpperCAmelCase_ = vqa_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ ) hf_vqa_model.load_state_dict(snake_case_ ) UpperCAmelCase_ = ["How many dogs are in this image?"] UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" ) UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth" UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" ) itm_model.eval() UpperCAmelCase_ = itm_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ ) UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"] UpperCAmelCase_ = tokenizer( snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(snake_case_ ) hf_itm_model.eval() UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ ) UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE_: int =parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
78
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE_: Tuple ={ 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[Any] =[ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
78
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case_ : Optional[int] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case_ : Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCAmelCase_ = [] for i in range(snake_case_ ): UpperCAmelCase_ = i / num_diffusion_timesteps UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) ) return torch.tensor(snake_case_ , dtype=torch.floataa ) class __A ( UpperCamelCase__ , UpperCamelCase__ ): a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers] a__ : Optional[Any] = 2 @register_to_config def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ): if trained_betas is not None: UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa ) elif beta_schedule == "linear": UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase_ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" ) elif beta_schedule == "exp": UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) UpperCAmelCase_ = 1.0 - self.betas UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__a , __a , __a ) UpperCAmelCase_ = use_karras_sigmas def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ): if schedule_timesteps is None: UpperCAmelCase_ = self.timesteps UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: UpperCAmelCase_ = 1 if len(__a ) > 1 else 0 else: UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep UpperCAmelCase_ = self._index_counter[timestep_int] return indices[pos].item() @property def _lowercase (self : List[Any] ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ): UpperCAmelCase_ = self.index_for_timestep(__a ) UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5) return sample def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ): UpperCAmelCase_ = num_inference_steps UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy() elif self.config.timestep_spacing == "leading": UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) UpperCAmelCase_ = np.log(__a ) UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a ) if self.config.use_karras_sigmas: UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps ) UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] ) UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a ) UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) UpperCAmelCase_ = torch.from_numpy(__a ) UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(__a ).startswith("mps" ): # mps does not support float64 UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa ) else: UpperCAmelCase_ = timesteps.to(device=__a ) # empty dt and derivative UpperCAmelCase_ = None UpperCAmelCase_ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter UpperCAmelCase_ = defaultdict(__a ) def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ): # get log sigma UpperCAmelCase_ = np.log(__a ) # get distribution UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) UpperCAmelCase_ = low_idx + 1 UpperCAmelCase_ = log_sigmas[low_idx] UpperCAmelCase_ = log_sigmas[high_idx] # interpolate sigmas UpperCAmelCase_ = (low - log_sigma) / (low - high) UpperCAmelCase_ = np.clip(__a , 0 , 1 ) # transform interpolation to time range UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx UpperCAmelCase_ = t.reshape(sigma.shape ) return t def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ): UpperCAmelCase_ = in_sigmas[-1].item() UpperCAmelCase_ = in_sigmas[0].item() UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper UpperCAmelCase_ = np.linspace(0 , 1 , __a ) UpperCAmelCase_ = sigma_min ** (1 / rho) UpperCAmelCase_ = sigma_max ** (1 / rho) UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _lowercase (self : List[str] ): return self.dt is None def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ): UpperCAmelCase_ = self.index_for_timestep(__a ) # advance index counter by 1 UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method UpperCAmelCase_ = self.sigmas[step_index - 1] UpperCAmelCase_ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API UpperCAmelCase_ = 0 UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": UpperCAmelCase_ = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: UpperCAmelCase_ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep UpperCAmelCase_ = sigma_next - sigma_hat # store for 2nd order step UpperCAmelCase_ = derivative UpperCAmelCase_ = dt UpperCAmelCase_ = sample else: # 2. 2nd order / Heun's method UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next UpperCAmelCase_ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample UpperCAmelCase_ = self.dt UpperCAmelCase_ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__a ) def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__a ): # mps does not support float64 UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: UpperCAmelCase_ = self.timesteps.to(original_samples.device ) UpperCAmelCase_ = timesteps.to(original_samples.device ) UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps] UpperCAmelCase_ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): UpperCAmelCase_ = sigma.unsqueeze(-1 ) UpperCAmelCase_ = original_samples + noise * sigma return noisy_samples def __len__(self : str ): return self.config.num_train_timesteps
78
1
'''simple docstring''' SCREAMING_SNAKE_CASE_: dict[str, float] ={ "km/h": 1.0, "m/s": 3.6, "mph": 1.609344, "knot": 1.852, } SCREAMING_SNAKE_CASE_: dict[str, float] ={ "km/h": 1.0, "m/s": 0.277777778, "mph": 0.621371192, "knot": 0.539956803, } def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : str , snake_case_ : str ) -> float: '''simple docstring''' if unit_to not in speed_chart or unit_from not in speed_chart_inverse: UpperCAmelCase_ = ( f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n""" f"""Valid values are: {", ".join(snake_case_ )}""" ) raise ValueError(snake_case_ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
78
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __A ( UpperCamelCase__ ): a__ : List[str] = """Salesforce/blip-image-captioning-base""" a__ : Optional[Any] = ( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) a__ : str = """image_captioner""" a__ : List[str] = AutoModelForVisionaSeq a__ : int = ["""image"""] a__ : Optional[Any] = ["""text"""] def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ): requires_backends(self , ["vision"] ) super().__init__(*__a , **__a ) def _lowercase (self : Union[str, Any] , __a : "Image" ): return self.pre_processor(images=__a , return_tensors="pt" ) def _lowercase (self : List[str] , __a : Dict ): return self.model.generate(**__a ) def _lowercase (self : int , __a : Optional[Any] ): return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
78
1
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __A ( unittest.TestCase ): def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = get_activation("swish" ) self.assertIsInstance(__a , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _lowercase (self : str ): UpperCAmelCase_ = get_activation("silu" ) self.assertIsInstance(__a , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _lowercase (self : str ): UpperCAmelCase_ = get_activation("mish" ) self.assertIsInstance(__a , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = get_activation("gelu" ) self.assertIsInstance(__a , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
78
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]: '''simple docstring''' UpperCAmelCase_ = [] if isinstance(snake_case_ , snake_case_ ): for v in tree.values(): shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]: '''simple docstring''' UpperCAmelCase_ = [] for d in reversed(snake_case_ ): idx.append(flat_idx % d ) UpperCAmelCase_ = flat_idx // d return tuple(reversed(snake_case_ ) ) @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]: '''simple docstring''' def reduce_edge_list(snake_case_ : List[bool] ) -> None: UpperCAmelCase_ = True for i in range(len(snake_case_ ) ): UpperCAmelCase_ = -1 * (i + 1) l[reversed_idx] &= tally UpperCAmelCase_ = l[reversed_idx] if start_edges is None: UpperCAmelCase_ = [s == 0 for s in start] reduce_edge_list(snake_case_ ) if end_edges is None: UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )] reduce_edge_list(snake_case_ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(snake_case_ ) == 0: return [()] elif len(snake_case_ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] UpperCAmelCase_ = [] UpperCAmelCase_ = [] # Dimensions common to start and end can be selected directly for s, e in zip(snake_case_ , snake_case_ ): if s == e: path_list.append(slice(snake_case_ , s + 1 ) ) else: break UpperCAmelCase_ = tuple(snake_case_ ) UpperCAmelCase_ = len(snake_case_ ) # start == end, and we're done if divergence_idx == len(snake_case_ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase_ = start[divergence_idx] return tuple( path + (slice(snake_case_ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase_ = end[divergence_idx] return tuple( path + (slice(snake_case_ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor: '''simple docstring''' UpperCAmelCase_ = t.shape[:no_batch_dims] UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) ) # _get_minimal_slice_set is inclusive UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) ) # Get an ordered list of slices to perform UpperCAmelCase_ = _get_minimal_slice_set( snake_case_ , snake_case_ , snake_case_ , ) UpperCAmelCase_ = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any: '''simple docstring''' if not (len(snake_case_ ) > 0): raise ValueError("Must provide at least one input" ) UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )] UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] ) def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ ) UpperCAmelCase_ = None if _out is not None: UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) UpperCAmelCase_ = 1 for d in orig_batch_dims: flat_batch_dim *= d UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t UpperCAmelCase_ = 0 UpperCAmelCase_ = prepped_outputs for _ in range(snake_case_ ): # Chunk the input if not low_mem: UpperCAmelCase_ = _select_chunk else: UpperCAmelCase_ = partial( _chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , ) UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ ) # Run the layer on the chunk UpperCAmelCase_ = layer(**snake_case_ ) # Allocate space for the output if out is None: UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ ) # Put the chunk in its pre-allocated space if isinstance(snake_case_ , snake_case_ ): def assign(snake_case_ : dict , snake_case_ : dict ) -> None: for k, v in da.items(): if isinstance(snake_case_ , snake_case_ ): assign(snake_case_ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: UpperCAmelCase_ = da[k] assign(snake_case_ , snake_case_ ) elif isinstance(snake_case_ , snake_case_ ): for xa, xa in zip(snake_case_ , snake_case_ ): if _add_into_out: xa[i : i + chunk_size] += xa else: UpperCAmelCase_ = xa elif isinstance(snake_case_ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: UpperCAmelCase_ = output_chunk else: raise ValueError("Not supported" ) i += chunk_size UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ ) return out class __A : def __init__(self : Dict , __a : int = 512 , ): UpperCAmelCase_ = max_chunk_size UpperCAmelCase_ = None UpperCAmelCase_ = None def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ): logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size] UpperCAmelCase_ = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__a : int ) -> bool: try: with torch.no_grad(): fn(*__a , chunk_size=__a ) return True except RuntimeError: return False UpperCAmelCase_ = 0 UpperCAmelCase_ = len(__a ) - 1 while i > min_viable_chunk_size_index: UpperCAmelCase_ = test_chunk_size(candidates[i] ) if not viable: UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2 else: UpperCAmelCase_ = i UpperCAmelCase_ = (i + len(__a ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _lowercase (self : int , __a : Iterable , __a : Iterable ): UpperCAmelCase_ = True for aa, aa in zip(__a , __a ): assert type(__a ) == type(__a ) if isinstance(__a , (list, tuple) ): consistent &= self._compare_arg_caches(__a , __a ) elif isinstance(__a , __a ): UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] consistent &= self._compare_arg_caches(__a , __a ) else: consistent &= aa == aa return consistent def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ): UpperCAmelCase_ = True UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(__a ) UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a ) else: # Otherwise, we can reuse the precomputed value UpperCAmelCase_ = False if not consistent: UpperCAmelCase_ = self._determine_favorable_chunk_size( __a , __a , __a , ) UpperCAmelCase_ = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
78
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE_: str ={ 'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Union[str, Any] =[ 'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
78
'''simple docstring''' import copy import re class __A : a__ : Optional[int] = """hp""" a__ : Optional[Any] = {} a__ : List[Any] = None @classmethod def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ): UpperCAmelCase_ = prefix UpperCAmelCase_ = defaults cls.build_naming_info() @staticmethod def _lowercase (__a : List[Any] , __a : List[str] ): if len(__a ) == 0: return "" UpperCAmelCase_ = None if any(char.isdigit() for char in word ): raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__a ) + 1 ): UpperCAmelCase_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: UpperCAmelCase_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__a : Union[str, Any] ): UpperCAmelCase_ = "" while integer != 0: UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s UpperCAmelCase_ = 0 while True: UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a ) if sword in info["reverse_short_word"]: continue else: UpperCAmelCase_ = sword break UpperCAmelCase_ = short_word UpperCAmelCase_ = word return short_word @staticmethod def _lowercase (__a : List[str] , __a : Union[str, Any] ): UpperCAmelCase_ = param_name.split("_" ) UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name UpperCAmelCase_ = ["", "_"] for separator in separators: UpperCAmelCase_ = separator.join(__a ) if shortname not in info["reverse_short_param"]: UpperCAmelCase_ = shortname UpperCAmelCase_ = param_name return shortname return param_name @staticmethod def _lowercase (__a : int , __a : Union[str, Any] ): UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a ) UpperCAmelCase_ = short_name UpperCAmelCase_ = param_name @classmethod def _lowercase (cls : Any ): if cls.NAMING_INFO is not None: return UpperCAmelCase_ = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } UpperCAmelCase_ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__a , __a ) UpperCAmelCase_ = info @classmethod def _lowercase (cls : int , __a : Optional[int] ): cls.build_naming_info() assert cls.PREFIX is not None UpperCAmelCase_ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k] if isinstance(__a , __a ): UpperCAmelCase_ = 1 if v else 0 UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-" UpperCAmelCase_ = f"""{key}{sep}{v}""" name.append(__a ) return "_".join(__a ) @classmethod def _lowercase (cls : Dict , __a : Dict ): UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :] if repr == "": UpperCAmelCase_ = [] else: UpperCAmelCase_ = repr.split("_" ) UpperCAmelCase_ = {} for value in values: if "-" in value: UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" ) else: UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a ) UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) ) UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k] UpperCAmelCase_ = p_v for k in cls.DEFAULTS: if k not in parameters: UpperCAmelCase_ = cls.DEFAULTS[k] return parameters
78
1
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __A : def __init__(self : str , __a : Optional[int] , __a : Optional[int]=13 , __a : Optional[Any]=7 , __a : str=True , __a : str=True , __a : Tuple=False , __a : List[str]=True , __a : Optional[Any]=99 , __a : Any=32 , __a : Optional[Any]=5 , __a : List[Any]=4 , __a : List[str]=37 , __a : List[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : List[str]=512 , __a : Optional[Any]=16 , __a : List[Any]=2 , __a : Tuple=0.02 , __a : str=3 , __a : Optional[int]=4 , __a : Dict=None , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_input_mask UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_labels UpperCAmelCase_ = num_choices UpperCAmelCase_ = scope def _lowercase (self : int ): UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = None if self.use_input_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ = None if self.use_token_type_ids: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase (self : int ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , ) def _lowercase (self : Tuple , __a : Dict , __a : Any , __a : List[str] , __a : Dict , __a : Any , __a : Any , __a : List[Any] ): UpperCAmelCase_ = BioGptModel(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a ) UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase (self : List[Any] , __a : Any , __a : Union[str, Any] , __a : Optional[Any] , __a : int , __a : Optional[Any] , __a : Any , __a : List[str] , __a : Optional[Any] , __a : List[Any] , ): UpperCAmelCase_ = BioGptForCausalLM(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase (self : Optional[int] , __a : Tuple , __a : List[Any] , __a : str , __a : Dict , __a : Tuple , *__a : List[Any] ): UpperCAmelCase_ = BioGptModel(config=__a ) model.to(__a ) model.eval() # create attention mask UpperCAmelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=__a ) UpperCAmelCase_ = self.seq_length // 2 UpperCAmelCase_ = 0 # first forward pass UpperCAmelCase_ , UpperCAmelCase_ = model(__a , attention_mask=__a ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCAmelCase_ = ids_tensor((1,) , __a ).item() + 1 UpperCAmelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCAmelCase_ = random_other_next_tokens # append to next input_ids and attn_mask UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__a )] , dim=1 , ) # get two different outputs UpperCAmelCase_ = model(__a , attention_mask=__a )["last_hidden_state"] UpperCAmelCase_ = model(__a , past_key_values=__a , attention_mask=__a )["last_hidden_state"] # select random slice UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ = output_from_no_past[:, -1, random_slice_idx].detach() UpperCAmelCase_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) ) def _lowercase (self : Dict , __a : Tuple , __a : List[str] , __a : Optional[Any] , __a : str , __a : Any , *__a : Optional[int] ): UpperCAmelCase_ = BioGptModel(config=__a ).to(__a ).eval() UpperCAmelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=__a ) # first forward pass UpperCAmelCase_ = model(__a , attention_mask=__a , use_cache=__a ) UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ = model(__a , attention_mask=__a )["last_hidden_state"] UpperCAmelCase_ = model(__a , attention_mask=__a , past_key_values=__a )[ "last_hidden_state" ] # select random slice UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) ) def _lowercase (self : Optional[Any] , __a : Optional[Any] , __a : Optional[int] , __a : Optional[int] , __a : Tuple , __a : Tuple , *__a : List[Any] , __a : Optional[int]=False ): UpperCAmelCase_ = BioGptForCausalLM(__a ) model.to(__a ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCAmelCase_ = model(__a , labels=__a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def _lowercase (self : Tuple , __a : int , *__a : int ): UpperCAmelCase_ = BioGptModel(__a ) UpperCAmelCase_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def _lowercase (self : Any , __a : str , __a : List[str] , __a : Tuple , __a : Any , __a : List[Any] , *__a : Tuple ): UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = BioGptForTokenClassification(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , token_type_ids=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase (self : List[str] ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : List[str] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) a__ : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else () a__ : int = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) a__ : Union[str, Any] = False def _lowercase (self : str ): UpperCAmelCase_ = BioGptModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 ) def _lowercase (self : Tuple ): self.config_tester.run_common_tests() def _lowercase (self : List[str] ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : Any ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ = type self.model_tester.create_and_check_model(*__a ) def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__a ) def _lowercase (self : List[str] ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__a , gradient_checkpointing=__a ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__a ) def _lowercase (self : Dict ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__a ) def _lowercase (self : int ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__a ) @slow def _lowercase (self : int ): UpperCAmelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(__a ) UpperCAmelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) UpperCAmelCase_ = "left" # Define PAD Token = EOS Token = 50256 UpperCAmelCase_ = tokenizer.eos_token UpperCAmelCase_ = model.config.eos_token_id # use different length sentences to test batching UpperCAmelCase_ = [ "Hello, my dog is a little", "Today, I", ] UpperCAmelCase_ = tokenizer(__a , return_tensors="pt" , padding=__a ) UpperCAmelCase_ = inputs["input_ids"].to(__a ) UpperCAmelCase_ = model.generate( input_ids=__a , attention_mask=inputs["attention_mask"].to(__a ) , ) UpperCAmelCase_ = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(__a ) UpperCAmelCase_ = model.generate(input_ids=__a ) UpperCAmelCase_ = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() UpperCAmelCase_ = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(__a ) UpperCAmelCase_ = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings ) UpperCAmelCase_ = tokenizer.batch_decode(__a , skip_special_tokens=__a ) UpperCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a ) UpperCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=__a ) UpperCAmelCase_ = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(__a , __a ) self.assertListEqual(__a , [non_padded_sentence, padded_sentence] ) @slow def _lowercase (self : Dict ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = BioGptModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = input_dict["input_ids"] UpperCAmelCase_ = input_ids.ne(1 ).to(__a ) UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ = BioGptForSequenceClassification(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = "multi_label_classification" UpperCAmelCase_ = input_dict["input_ids"] UpperCAmelCase_ = input_ids.ne(1 ).to(__a ) UpperCAmelCase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ = BioGptForSequenceClassification(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __A ( unittest.TestCase ): @slow def _lowercase (self : List[Any] ): UpperCAmelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) UpperCAmelCase_ = torch.tensor([[2, 4805, 9, 656, 21]] ) UpperCAmelCase_ = model(__a )[0] UpperCAmelCase_ = 42384 UpperCAmelCase_ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __a ) UpperCAmelCase_ = torch.tensor( [[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) ) @slow def _lowercase (self : List[str] ): UpperCAmelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) UpperCAmelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(__a ) torch.manual_seed(0 ) UpperCAmelCase_ = tokenizer("COVID-19 is" , return_tensors="pt" ).to(__a ) UpperCAmelCase_ = model.generate( **__a , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=__a , ) UpperCAmelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=__a ) UpperCAmelCase_ = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(__a , __a )
78
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): a__ : Tuple = ["""pixel_values"""] def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ): super().__init__(**__a ) UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_pad UpperCAmelCase_ = pad_size def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ): return rescale(__a , scale=__a , data_format=__a , **__a ) def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ): UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a ) UpperCAmelCase_ = (old_height // size + 1) * size - old_height UpperCAmelCase_ = (old_width // size + 1) * size - old_width return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a ) def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ): UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size UpperCAmelCase_ = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(__a ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images] if do_pad: UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images] UpperCAmelCase_ = {"pixel_values": images} return BatchFeature(data=__a , tensor_type=__a )
78
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "imagenet-1k-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = {v: k for k, v in idalabel.items()} UpperCAmelCase_ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" UpperCAmelCase_ = BitConfig( conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , ) return config def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if "stem.conv" in name: UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: UpperCAmelCase_ = name.replace("blocks" , "layers" ) if "head.fc" in name: UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): UpperCAmelCase_ = "bit." + name if "bit" not in name and "classifier" not in name: UpperCAmelCase_ = "bit.encoder." + name return name def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = get_config(snake_case_ ) # load original model from timm UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ ) timm_model.eval() # load state_dict of original model UpperCAmelCase_ = timm_model.state_dict() for key in state_dict.copy().keys(): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val.squeeze() if "head" in key else val # load HuggingFace model UpperCAmelCase_ = BitForImageClassification(snake_case_ ) model.eval() model.load_state_dict(snake_case_ ) # create image processor UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) ) UpperCAmelCase_ = transform.transforms UpperCAmelCase_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } UpperCAmelCase_ = BitImageProcessor( do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ) UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(snake_case_ , snake_case_ ) # verify logits with torch.no_grad(): UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) UpperCAmelCase_ = timm_model(snake_case_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) if push_to_hub: print(f"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(f"""ybelkada/{model_name}""" ) processor.push_to_hub(f"""ybelkada/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
78
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# SCREAMING_SNAKE_CASE_: Dict =[ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] SCREAMING_SNAKE_CASE_: List[Any] =[ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] SCREAMING_SNAKE_CASE_: Union[str, Any] =[] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0." unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}." SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1." unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0." unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}." SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1." unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv." SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op." unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0." SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}." unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.' SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}." SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}." unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: UpperCAmelCase_ = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# SCREAMING_SNAKE_CASE_: int =[ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}." vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0." SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample." vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0." SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample." vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}." vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}." SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}." vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) SCREAMING_SNAKE_CASE_: int =[ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple: '''simple docstring''' return w.reshape(*w.shape , 1 , 1 ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()} UpperCAmelCase_ = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# SCREAMING_SNAKE_CASE_: List[Any] =[ # (stable-diffusion, HF Diffusers) ('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'), ] SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst} SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2} def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = {} UpperCAmelCase_ = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): UpperCAmelCase_ = k[: -len(".q_proj.weight" )] UpperCAmelCase_ = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: UpperCAmelCase_ = [None, None, None] UpperCAmelCase_ = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): UpperCAmelCase_ = k[: -len(".q_proj.bias" )] UpperCAmelCase_ = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: UpperCAmelCase_ = [None, None, None] UpperCAmelCase_ = v continue UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = torch.cat(snake_case_ ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = torch.cat(snake_case_ ) return new_state_dict def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]: '''simple docstring''' return text_enc_dict if __name__ == "__main__": SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.' ) SCREAMING_SNAKE_CASE_: Dict =parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors') SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors') SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu') else: SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin') SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu') if osp.exists(vae_path): SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu') else: SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin') SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu') if osp.exists(text_enc_path): SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu') else: SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin') SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu') # Convert the UNet model SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict) SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict) SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()} SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict) SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()} else: SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict) SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict} torch.save(state_dict, args.checkpoint_path)
78
1
'''simple docstring''' import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: str ={ 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class __A ( UpperCamelCase__ ): a__ : Tuple = """detr""" a__ : Dict = ["""past_key_values"""] a__ : Dict = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__(self : Dict , __a : List[Any]=True , __a : List[Any]=None , __a : Optional[int]=3 , __a : Tuple=100 , __a : Optional[int]=6 , __a : Optional[Any]=2048 , __a : Tuple=8 , __a : Optional[Any]=6 , __a : Optional[int]=2048 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Dict=0.0 , __a : str=True , __a : Optional[Any]="relu" , __a : Optional[Any]=256 , __a : Union[str, Any]=0.1 , __a : List[Any]=0.0 , __a : Any=0.0 , __a : str=0.02 , __a : Union[str, Any]=1.0 , __a : Any=False , __a : Optional[Any]="sine" , __a : int="resnet50" , __a : int=True , __a : List[str]=False , __a : str=1 , __a : Optional[Any]=5 , __a : List[Any]=2 , __a : List[str]=1 , __a : Tuple=1 , __a : Optional[Any]=5 , __a : Union[str, Any]=2 , __a : Any=0.1 , **__a : str , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__a , __a ): UpperCAmelCase_ = backbone_config.get("model_type" ) UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ = config_class.from_dict(__a ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None, None, None UpperCAmelCase_ = use_timm_backbone UpperCAmelCase_ = backbone_config UpperCAmelCase_ = num_channels UpperCAmelCase_ = num_queries UpperCAmelCase_ = d_model UpperCAmelCase_ = encoder_ffn_dim UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = encoder_attention_heads UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = activation_function UpperCAmelCase_ = init_std UpperCAmelCase_ = init_xavier_std UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = auxiliary_loss UpperCAmelCase_ = position_embedding_type UpperCAmelCase_ = backbone UpperCAmelCase_ = use_pretrained_backbone UpperCAmelCase_ = dilation # Hungarian matcher UpperCAmelCase_ = class_cost UpperCAmelCase_ = bbox_cost UpperCAmelCase_ = giou_cost # Loss coefficients UpperCAmelCase_ = mask_loss_coefficient UpperCAmelCase_ = dice_loss_coefficient UpperCAmelCase_ = bbox_loss_coefficient UpperCAmelCase_ = giou_loss_coefficient UpperCAmelCase_ = eos_coefficient super().__init__(is_encoder_decoder=__a , **__a ) @property def _lowercase (self : Optional[int] ): return self.encoder_attention_heads @property def _lowercase (self : Any ): return self.d_model @classmethod def _lowercase (cls : str , __a : PretrainedConfig , **__a : List[Any] ): return cls(backbone_config=__a , **__a ) def _lowercase (self : Dict ): UpperCAmelCase_ = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ = self.backbone_config.to_dict() UpperCAmelCase_ = self.__class__.model_type return output class __A ( UpperCamelCase__ ): a__ : str = version.parse("""1.11""" ) @property def _lowercase (self : int ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _lowercase (self : List[str] ): return 1E-5 @property def _lowercase (self : Union[str, Any] ): return 12
78
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float: '''simple docstring''' return np.dot(snake_case_ , snake_case_ ) class __A : def __init__(self : int , *, __a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ): UpperCAmelCase_ = regularization UpperCAmelCase_ = gamma if kernel == "linear": UpperCAmelCase_ = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma" ) if not isinstance(self.gamma , (float, int) ): raise ValueError("gamma must be float or int" ) if not self.gamma > 0: raise ValueError("gamma must be > 0" ) UpperCAmelCase_ = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: UpperCAmelCase_ = f"""Unknown kernel: {kernel}""" raise ValueError(__a ) def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ): return np.dot(__a , __a ) def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ): UpperCAmelCase_ = observations UpperCAmelCase_ = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((UpperCAmelCase_) , ) = np.shape(__a ) def to_minimize(__a : ndarray ) -> float: UpperCAmelCase_ = 0 ((UpperCAmelCase_) , ) = np.shape(__a ) for i in range(__a ): for j in range(__a ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(__a ) UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 ) UpperCAmelCase_ = Bounds(0 , self.regularization ) UpperCAmelCase_ = minimize( __a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x UpperCAmelCase_ = l_star # calculating mean offset of separation plane to points UpperCAmelCase_ = 0 for i in range(__a ): for j in range(__a ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) UpperCAmelCase_ = s / n def _lowercase (self : Optional[int] , __a : ndarray ): UpperCAmelCase_ = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , __a ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
78
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __lowercase ( snake_case, snake_case ): """simple docstring""" __magic_name__ :List[str] = args.log_outputs __magic_name__ :Tuple = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric __magic_name__ :Dict = load_metric('''wer''' ) __magic_name__ :List[Any] = load_metric('''cer''' ) # compute metrics __magic_name__ :Optional[Any] = wer.compute(references=result['''target'''], predictions=result['''prediction'''] ) __magic_name__ :Dict = cer.compute(references=result['''target'''], predictions=result['''prediction'''] ) # print & log results __magic_name__ :Tuple = f'''WER: {wer_result}\nCER: {cer_result}''' print(snake_case ) with open(f'''{dataset_id}_eval_results.txt''', '''w''' ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __magic_name__ :Dict = f'''log_{dataset_id}_predictions.txt''' __magic_name__ :Union[str, Any] = f'''log_{dataset_id}_targets.txt''' with open(snake_case, '''w''' ) as p, open(snake_case, '''w''' ) as t: # mapping function to write output def write_to_file(snake_case, snake_case ): p.write(f'''{i}''' + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f'''{i}''' + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(snake_case, with_indices=snake_case ) def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Optional[Any] = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __magic_name__ :Optional[Any] = re.sub(snake_case, '''''', text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __magic_name__ :Tuple = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: __magic_name__ :Any = ''' '''.join(text.split(snake_case ) ) return text def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :List[str] = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __magic_name__ :str = AutoFeatureExtractor.from_pretrained(args.model_id ) __magic_name__ :Optional[int] = feature_extractor.sampling_rate # resample audio __magic_name__ :Dict = dataset.cast_column('''audio''', Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: __magic_name__ :List[str] = 0 if torch.cuda.is_available() else -1 __magic_name__ :Any = pipeline('''automatic-speech-recognition''', model=args.model_id, device=args.device ) # map function to decode audio def map_to_pred(snake_case ): __magic_name__ :List[str] = asr( batch['''audio''']['''array'''], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s ) __magic_name__ :Any = prediction['''text'''] __magic_name__ :Optional[int] = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples __magic_name__ :Any = dataset.map(snake_case, remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case, snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) SCREAMING_SNAKE_CASE__ : int = parser.parse_args() main(args)
0
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: List[Any] ={ 'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __A ( UpperCamelCase__ ): a__ : List[Any] = """perceiver""" def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ): super().__init__(**__a ) UpperCAmelCase_ = num_latents UpperCAmelCase_ = d_latents UpperCAmelCase_ = d_model UpperCAmelCase_ = num_blocks UpperCAmelCase_ = num_self_attends_per_block UpperCAmelCase_ = num_self_attention_heads UpperCAmelCase_ = num_cross_attention_heads UpperCAmelCase_ = qk_channels UpperCAmelCase_ = v_channels UpperCAmelCase_ = cross_attention_shape_for_attention UpperCAmelCase_ = self_attention_widening_factor UpperCAmelCase_ = cross_attention_widening_factor UpperCAmelCase_ = hidden_act UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = use_query_residual # masked language modeling attributes UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings # image classification attributes UpperCAmelCase_ = image_size # flow attributes UpperCAmelCase_ = train_size # multimodal autoencoding attributes UpperCAmelCase_ = num_frames UpperCAmelCase_ = audio_samples_per_frame UpperCAmelCase_ = samples_per_patch UpperCAmelCase_ = output_shape class __A ( UpperCamelCase__ ): @property def _lowercase (self : Dict ): if self.task == "multiple-choice": UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def _lowercase (self : Optional[Any] ): return 1E-4 def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(__a , __a ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a ) UpperCAmelCase_ = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) ) UpperCAmelCase_ = inputs.pop("input_ids" ) return inputs elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch ) UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a ) UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) ) UpperCAmelCase_ = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
78
0
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _A ( ) -> int: """simple docstring""" __UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' __UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' ) return image def _A ( _lowercase ) -> int: """simple docstring""" __UpperCamelCase = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" __UpperCamelCase = dct.pop(_lowercase ) __UpperCamelCase = val def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) __UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict __UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) ) __UpperCamelCase = qkv_bias def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" __UpperCamelCase = 3_64 if 'coco' in model_name else 2_24 __UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: __UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict() elif "opt-6.7b" in model_name: __UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict() elif "t5-xl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() __UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase ) return config, image_size @torch.no_grad() def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) __UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0] __UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase ) __UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval() __UpperCamelCase = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } __UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name] # load original model print('Loading original model...' ) __UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu' __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess( name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase ) original_model.eval() print('Done!' ) # update state dict keys __UpperCamelCase = original_model.state_dict() __UpperCamelCase = create_rename_keys(_lowercase ) for src, dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __UpperCamelCase = state_dict.pop(_lowercase ) if key.startswith('Qformer.bert' ): __UpperCamelCase = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: __UpperCamelCase = key.replace('self' , 'attention' ) if "opt_proj" in key: __UpperCamelCase = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: __UpperCamelCase = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): __UpperCamelCase = key.replace('opt' , 'language' ) if key.startswith('t5' ): __UpperCamelCase = key.replace('t5' , 'language' ) __UpperCamelCase = val # read in qv biases read_in_q_v_bias(_lowercase , _lowercase ) __UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase ) assert len(_lowercase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] __UpperCamelCase = load_demo_image() __UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase ) __UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase ) # create processor __UpperCamelCase = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase ) __UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase ) __UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase ) # make sure processor creates exact same pixel values assert torch.allclose(_lowercase , _lowercase ) original_model.to(_lowercase ) hf_model.to(_lowercase ) with torch.no_grad(): if "opt" in model_name: __UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits __UpperCamelCase = hf_model(_lowercase , _lowercase ).logits else: __UpperCamelCase = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits __UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) __UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": __UpperCamelCase = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase ) assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": __UpperCamelCase = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase ) else: # cast to same type __UpperCamelCase = logits.dtype assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 ) print('Looks ok!' ) print('Generating a caption...' ) __UpperCamelCase = '' __UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase ) __UpperCamelCase = original_model.generate({'image': original_pixel_values} ) __UpperCamelCase = hf_model.generate( _lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , _lowercase ) __UpperCamelCase = input_ids.shape[1] __UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase ) __UpperCamelCase = [text.strip() for text in output_text] print('HF generation:' , _lowercase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_lowercase ) hf_model.save_pretrained(_lowercase ) if push_to_hub: processor.push_to_hub(f'''nielsr/{model_name}''' ) hf_model.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() __snake_case = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) __snake_case = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
1
'''simple docstring''' import requests def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None: '''simple docstring''' UpperCAmelCase_ = {"Content-Type": "application/json"} UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ ) if response.status_code != 2_00: UpperCAmelCase_ = ( "Request to slack returned an error " f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(snake_case_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
78
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
2
'''simple docstring''' from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name class __A ( UpperCamelCase__ ): def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ): super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: UpperCAmelCase_ = ( f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a ) UpperCAmelCase_ = dict(scheduler.config ) UpperCAmelCase_ = 1 UpperCAmelCase_ = FrozenDict(__a ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: UpperCAmelCase_ = ( f"""The configuration file of this scheduler: {scheduler} has not set the configuration""" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a ) UpperCAmelCase_ = dict(scheduler.config ) UpperCAmelCase_ = True UpperCAmelCase_ = FrozenDict(__a ) if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , ) def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__a ) def _lowercase (self : int ): self.enable_attention_slicing(__a ) def _lowercase (self : Optional[Any] ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase_ = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(__a , __a ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowercase (self : Optional[int] ): if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__a , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ): UpperCAmelCase_ = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) UpperCAmelCase_ = self.segmentation_model(**__a ) UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size ) # Run inpainting pipeline with the generated mask UpperCAmelCase_ = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
78
0
'''simple docstring''' from __future__ import annotations from collections import deque class SCREAMING_SNAKE_CASE__ : def __init__( self , A_ )-> List[str]: '''simple docstring''' UpperCamelCase = [] self.adlist.append( {'value': '', 'next_states': [], 'fail_state': 0, 'output': []} ) for keyword in keywords: self.add_keyword(A_ ) self.set_fail_transitions() def UpperCAmelCase_ ( self , A_ , A_ )-> int | None: '''simple docstring''' for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCAmelCase_ ( self , A_ )-> None: '''simple docstring''' UpperCamelCase = 0 for character in keyword: UpperCamelCase = self.find_next_state(A_ , A_ ) if next_state is None: self.adlist.append( { 'value': character, 'next_states': [], 'fail_state': 0, 'output': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) UpperCamelCase = len(self.adlist ) - 1 else: UpperCamelCase = next_state self.adlist[current_state]["output"].append(A_ ) def UpperCAmelCase_ ( self )-> None: '''simple docstring''' UpperCamelCase = deque() for node in self.adlist[0]["next_states"]: q.append(A_ ) UpperCamelCase = 0 while q: UpperCamelCase = q.popleft() for child in self.adlist[r]["next_states"]: q.append(A_ ) UpperCamelCase = self.adlist[r]['fail_state'] while ( self.find_next_state(A_ , self.adlist[child]['value'] ) is None and state != 0 ): UpperCamelCase = self.adlist[state]['fail_state'] UpperCamelCase = self.find_next_state( A_ , self.adlist[child]['value'] ) if self.adlist[child]["fail_state"] is None: UpperCamelCase = 0 UpperCamelCase = ( self.adlist[child]['output'] + self.adlist[self.adlist[child]['fail_state']]['output'] ) def UpperCAmelCase_ ( self , A_ )-> dict[str, list[int]]: '''simple docstring''' UpperCamelCase = {} # returns a dict with keywords and list of its occurrences UpperCamelCase = 0 for i in range(len(A_ ) ): while ( self.find_next_state(A_ , string[i] ) is None and current_state != 0 ): UpperCamelCase = self.adlist[current_state]['fail_state'] UpperCamelCase = self.find_next_state(A_ , string[i] ) if next_state is None: UpperCamelCase = 0 else: UpperCamelCase = next_state for key in self.adlist[current_state]["output"]: if key not in result: UpperCamelCase = [] result[key].append(i - len(A_ ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int ) -> bool: '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
78
0
"""simple docstring""" import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): lowerCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() ) lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params __UpperCamelCase : Union[str, Any] = logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ): if metric == "rouge2": lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": lowerCAmelCase = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": lowerCAmelCase = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) lowerCAmelCase = ModelCheckpoint( dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ): return EarlyStopping( monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , ) class a ( pl.Callback ): def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_snake_case ) @rank_zero_only def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case=True ): """simple docstring""" logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) lowerCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results lowerCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": lowerCAmelCase = od / 'test_results.txt' lowerCAmelCase = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowerCAmelCase = od / F'{type_path}_results/{trainer.global_step:05d}.txt' lowerCAmelCase = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=_snake_case ) generations_file.parent.mkdir(exist_ok=_snake_case ) with open(_snake_case , 'a+' ) as writer: for key in sorted(_snake_case ): if key in ["log", "progress_bar", "preds"]: continue lowerCAmelCase = metrics[key] if isinstance(_snake_case , torch.Tensor ): lowerCAmelCase = val.item() lowerCAmelCase = F'{key}: {val:.6f}\n' writer.write(_snake_case ) if not save_generations: return if "preds" in metrics: lowerCAmelCase = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_snake_case ) @rank_zero_only def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" try: lowerCAmelCase = pl_module.model.model.num_parameters() except AttributeError: lowerCAmelCase = pl_module.model.num_parameters() lowerCAmelCase = count_trainable_parameters(_snake_case ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_snake_case , _snake_case , 'test' ) @rank_zero_only def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
4
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class __A : a__ : int a__ : TreeNode | None = None a__ : TreeNode | None = None SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess') def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int: '''simple docstring''' if root is None: return 0 # Validation def count_nodes(snake_case_ : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(snake_case_ : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(snake_case_ ) != count_coins(snake_case_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left ) UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right ) UpperCAmelCase_ = 1 - left_distrib_excess UpperCAmelCase_ = 1 - right_distrib_excess UpperCAmelCase_ = ( left_distrib_moves + right_distrib_moves + abs(snake_case_ ) + abs(snake_case_ ) ) UpperCAmelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(snake_case_ , snake_case_ ) return get_distrib(snake_case_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
78
0
'''simple docstring''' import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _lowercase : Optional[int] = '''ssube/stable-diffusion-x4-upscaler-onnx''' def _lowercase ( self , _lowercase=0 ): """simple docstring""" _lowerCAmelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(_lowercase ) ) _lowerCAmelCase = torch.manual_seed(_lowercase ) _lowerCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowercase ) _lowerCAmelCase = self.get_dummy_inputs() _lowerCAmelCase = pipe(**_lowercase ).images _lowerCAmelCase = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array( [0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) _lowerCAmelCase = self.get_dummy_inputs() _lowerCAmelCase = pipe(**_lowercase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array( [0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowercase ) _lowerCAmelCase = self.get_dummy_inputs() _lowerCAmelCase = pipe(**_lowercase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array( [0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowercase ) _lowerCAmelCase = self.get_dummy_inputs() _lowerCAmelCase = pipe(**_lowercase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array( [0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowercase ) _lowerCAmelCase = self.get_dummy_inputs() _lowerCAmelCase = pipe(**_lowercase ).images _lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array( [0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _lowercase ( self ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = ort.SessionOptions() _lowerCAmelCase = False return options def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) _lowerCAmelCase = init_image.resize((128, 128) ) # using the PNDM scheduler by default _lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) _lowerCAmelCase = """A fantasy landscape, trending on artstation""" _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( prompt=_lowercase , image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type="""np""" , ) _lowerCAmelCase = output.images _lowerCAmelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) _lowerCAmelCase = init_image.resize((128, 128) ) _lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" ) _lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowercase ) _lowerCAmelCase = """A fantasy landscape, trending on artstation""" _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe( prompt=_lowercase , image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type="""np""" , ) _lowerCAmelCase = output.images _lowerCAmelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) _lowerCAmelCase = np.array( [0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
5
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE_: int =logging.getLogger() def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ = parser.parse_args() return args.f def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" ) if os.path.exists(snake_case_ ): with open(snake_case_ , "r" ) as f: UpperCAmelCase_ = json.load(snake_case_ ) else: raise ValueError(f"""can't find {path}""" ) return results def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __A ( UpperCamelCase__ ): @classmethod def _lowercase (cls : Any ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def _lowercase (cls : int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : int ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 28 ) self.assertGreaterEqual(result["eval_exact"] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : str ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_rouge1"] , 10 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : List[str] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_bleu"] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) ) @slow def _lowercase (self : Dict ): UpperCAmelCase_ = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Any ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
78
0
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: int=False ): try: SCREAMING_SNAKE_CASE__ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. SCREAMING_SNAKE_CASE__ = default else: # KEY is set, convert it to True or False. try: SCREAMING_SNAKE_CASE__ = strtobool(UpperCamelCase__ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''' ) return _value _lowerCamelCase = parse_flag_from_env('RUN_SLOW', default=False) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ): return unittest.skip("""Test was skipped""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ): return unittest.skipUnless(_run_slow_tests , """test is slow""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ): return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ): return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ): return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ): return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ): return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ): return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ): return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ): return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ): return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ): return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any]=None , UpperCamelCase__: str=None ): if test_case is None: return partial(UpperCamelCase__ , version=UpperCamelCase__ ) return unittest.skipUnless(is_torch_version(""">=""" , UpperCamelCase__ ) , f'''test requires torch version >= {version}''' )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ): return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ): return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(UpperCamelCase__ ) _lowerCamelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ): return unittest.skipUnless( _atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(UpperCamelCase__ ) class UpperCamelCase_ ( unittest.TestCase ): lowerCamelCase_ = True @classmethod def _snake_case ( cls :int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() @classmethod def _snake_case ( cls :Optional[int] ) -> Tuple: """simple docstring""" if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def _snake_case ( self :Optional[int] ) -> Optional[int]: """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir ).glob("""**/*""" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(__A ) class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Dict , __A :Union[mock.Mock, List[mock.Mock]] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = mocks if isinstance(__A , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ): SCREAMING_SNAKE_CASE__ = AcceleratorState() SCREAMING_SNAKE_CASE__ = tensor[None].clone().to(state.device ) SCREAMING_SNAKE_CASE__ = gather(UpperCamelCase__ ).cpu() SCREAMING_SNAKE_CASE__ = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , UpperCamelCase__ ): return False return True class UpperCamelCase_ : def __init__( self :Any , __A :Optional[Any] , __A :int , __A :Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = returncode SCREAMING_SNAKE_CASE__ = stdout SCREAMING_SNAKE_CASE__ = stderr async def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: int ): while True: SCREAMING_SNAKE_CASE__ = await stream.readline() if line: callback(UpperCamelCase__ ) else: break async def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Tuple=None , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: str=None , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=False ): if echo: print("""\nRunning: """ , """ """.join(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE__ = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=UpperCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase__ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] def tee(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Dict , UpperCamelCase__: str="" ): SCREAMING_SNAKE_CASE__ = line.decode("""utf-8""" ).rstrip() sink.append(UpperCamelCase__ ) if not quiet: print(UpperCamelCase__ , UpperCamelCase__ , file=UpperCamelCase__ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stdout , label="""stdout:""" ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stderr , label="""stderr:""" ) ) ), ] , timeout=UpperCamelCase__ , ) return _RunOutput(await p.wait() , UpperCamelCase__ , UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: List[Any]=None , UpperCamelCase__: Union[str, Any]=180 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: str=True ): SCREAMING_SNAKE_CASE__ = asyncio.get_event_loop() SCREAMING_SNAKE_CASE__ = loop.run_until_complete( _stream_subprocess(UpperCamelCase__ , env=UpperCamelCase__ , stdin=UpperCamelCase__ , timeout=UpperCamelCase__ , quiet=UpperCamelCase__ , echo=UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE__ = """ """.join(UpperCamelCase__ ) if result.returncode > 0: SCREAMING_SNAKE_CASE__ = """\n""".join(result.stderr ) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''' ) return result class UpperCamelCase_ ( UpperCamelCase__ ): pass def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: List[Any]=False ): try: SCREAMING_SNAKE_CASE__ = subprocess.check_output(UpperCamelCase__ , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(UpperCamelCase__ , """decode""" ): SCREAMING_SNAKE_CASE__ = output.decode("""utf-8""" ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'''Command `{' '.join(UpperCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
6
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP SCREAMING_SNAKE_CASE_: Any =False try: SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab') except ModuleNotFoundError: pass @input.register class __A : def __init__(self : int , __a : str = None , __a : list = [] ): UpperCAmelCase_ = 0 UpperCAmelCase_ = choices UpperCAmelCase_ = prompt if sys.platform == "win32": UpperCAmelCase_ = "*" else: UpperCAmelCase_ = "➔ " def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ): if sys.platform != "win32": writeColor(self.choices[index] , 32 , __a ) else: forceWrite(self.choices[index] , __a ) def _lowercase (self : Any , __a : int ): if index == self.position: forceWrite(f""" {self.arrow_char} """ ) self.write_choice(__a ) else: forceWrite(f""" {self.choices[index]}""" ) reset_cursor() def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ): UpperCAmelCase_ = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__a ) move_cursor(__a , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def _lowercase (self : Dict ): self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def _lowercase (self : Any ): self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def _lowercase (self : Optional[Any] ): move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def _lowercase (self : str ): move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = int(chr(self.current_selection ) ) UpperCAmelCase_ = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , __a ) else: return else: return def _lowercase (self : Optional[Any] , __a : int = 0 ): if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) UpperCAmelCase_ = default_choice for i in range(len(self.choices ) ): self.print_choice(__a ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: UpperCAmelCase_ = int(builtins.input() ) except ValueError: UpperCAmelCase_ = default_choice else: UpperCAmelCase_ = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(__a , "\n" ) return choice
78
0
"""simple docstring""" import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json a = '''sshleifer/mar_enro_6_3_student''' class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): super().setUp() _A = cached_path( 'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_UpperCAmelCase , ) _A = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k''' @slow @require_torch_gpu def lowerCAmelCase_ ( self : Optional[Any] ): MarianMTModel.from_pretrained(_UpperCAmelCase ) @slow @require_torch_gpu def lowerCAmelCase_ ( self : Tuple ): _A = { '$MAX_LEN': 64, '$BS': 64, '$GAS': 1, '$ENRO_DIR': self.data_dir, 'facebook/mbart-large-cc25': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '--learning_rate=3e-5': '--learning_rate 3e-4', '--num_train_epochs 6': '--num_train_epochs 1', } # Clean up bash script _A = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip() _A = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' ) for k, v in env_vars_to_replace.items(): _A = bash_script.replace(_UpperCAmelCase , str(_UpperCAmelCase ) ) _A = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") _A = F''' --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 '''.split() # XXX: args.gpus > 1 : handle multi_gpu in the future _A = ['finetune.py'] + bash_script.split() + args with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ): _A = argparse.ArgumentParser() _A = pl.Trainer.add_argparse_args(_UpperCAmelCase ) _A = SummarizationModule.add_model_specific_args(_UpperCAmelCase , os.getcwd() ) _A = parser.parse_args() _A = main(_UpperCAmelCase ) # Check metrics _A = load_json(model.metrics_save_path ) _A = metrics['val'][0] _A = metrics['val'][-1] self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _UpperCAmelCase ) self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['val_avg_bleu'] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict _A = os.listdir(_UpperCAmelCase ) _A = [x for x in contents if x.endswith('.ckpt' )][0] _A = os.path.join(args.output_dir , _UpperCAmelCase ) _A = torch.load(_UpperCAmelCase , map_location='cpu' ) _A = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: _A = {os.path.basename(_UpperCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1 class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def lowerCAmelCase_ ( self : Dict ): _A = F'''{self.test_file_dir_str}/test_data/wmt_en_ro''' _A = { '--fp16_opt_level=O1': '', '$MAX_LEN': 128, '$BS': 16, '$GAS': 1, '$ENRO_DIR': data_dir, '$m': 'sshleifer/student_marian_en_ro_6_1', 'val_check_interval=0.25': 'val_check_interval=1.0', } # Clean up bash script _A = ( (self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip() ) _A = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' ) _A = bash_script.replace('--fp16 ' , ' ' ) for k, v in env_vars_to_replace.items(): _A = bash_script.replace(_UpperCAmelCase , str(_UpperCAmelCase ) ) _A = self.get_auto_remove_tmp_dir() _A = bash_script.replace('--fp16' , '' ) _A = 6 _A = ( ['distillation.py'] + bash_script.split() + [ F'''--output_dir={output_dir}''', '--gpus=1', '--learning_rate=1e-3', F'''--num_train_epochs={epochs}''', '--warmup_steps=10', '--val_check_interval=1.0', '--do_predict', ] ) with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ): _A = argparse.ArgumentParser() _A = pl.Trainer.add_argparse_args(_UpperCAmelCase ) _A = SummarizationDistiller.add_model_specific_args(_UpperCAmelCase , os.getcwd() ) _A = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu _A = distill_main(_UpperCAmelCase ) # Check metrics _A = load_json(model.metrics_save_path ) _A = metrics['val'][0] _A = metrics['val'][-1] assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _UpperCAmelCase ) # check lightning ckpt can be loaded and has a reasonable statedict _A = os.listdir(_UpperCAmelCase ) _A = [x for x in contents if x.endswith('.ckpt' )][0] _A = os.path.join(args.output_dir , _UpperCAmelCase ) _A = torch.load(_UpperCAmelCase , map_location='cpu' ) _A = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: _A = {os.path.basename(_UpperCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1
7
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor'] SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =[ 'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: int =[ 'FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
78
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase__ : int = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { '''microsoft/table-transformer-detection''': ( '''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''table-transformer''' lowerCAmelCase = ['''past_key_values'''] lowerCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=3 , _UpperCAmelCase=100 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase="resnet50" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , **_UpperCAmelCase , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.') if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') __A : int = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : Any = backbone_config.get('model_type') __A : int = CONFIG_MAPPING[backbone_model_type] __A : str = config_class.from_dict(_UpperCAmelCase) # set timm attributes to None __A ,__A ,__A : Optional[Any] = None, None, None __A : str = use_timm_backbone __A : Optional[Any] = backbone_config __A : Dict = num_channels __A : Optional[int] = num_queries __A : Optional[int] = d_model __A : Tuple = encoder_ffn_dim __A : str = encoder_layers __A : Optional[int] = encoder_attention_heads __A : Dict = decoder_ffn_dim __A : Tuple = decoder_layers __A : Union[str, Any] = decoder_attention_heads __A : Tuple = dropout __A : Optional[int] = attention_dropout __A : Union[str, Any] = activation_dropout __A : Optional[int] = activation_function __A : Optional[Any] = init_std __A : Tuple = init_xavier_std __A : Tuple = encoder_layerdrop __A : List[str] = decoder_layerdrop __A : Optional[Any] = encoder_layers __A : Optional[int] = auxiliary_loss __A : Any = position_embedding_type __A : Optional[int] = backbone __A : Any = use_pretrained_backbone __A : Tuple = dilation # Hungarian matcher __A : int = class_cost __A : List[Any] = bbox_cost __A : Any = giou_cost # Loss coefficients __A : Optional[int] = mask_loss_coefficient __A : Dict = dice_loss_coefficient __A : Dict = bbox_loss_coefficient __A : int = giou_loss_coefficient __A : int = eos_coefficient super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase) @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return self.d_model class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ]) @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return 1e-5 @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return 12
8
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed SCREAMING_SNAKE_CASE_: Any ={ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCAmelCase_ ( snake_case_ : Any ) -> str: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False elif args.student_type == "gpt2": UpperCAmelCase_ = False def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser(description="Training" ) parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." ) parser.add_argument( "--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." ) parser.add_argument( "--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." ) parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." ) parser.add_argument( "--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." ) parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." ) parser.add_argument( "--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." ) parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." ) parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." ) parser.add_argument( "--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." ) parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." ) parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." ) parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." ) parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." ) parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." ) parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." ) parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=snake_case_ , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." ) parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" ) parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" ) parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." ) parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." ) UpperCAmelCase_ = parser.parse_args() sanity_checks(snake_case_ ) # ARGS # init_gpu_params(snake_case_ ) set_seed(snake_case_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f: json.dump(vars(snake_case_ ) , snake_case_ , indent=4 ) git_log(args.dump_path ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCAmelCase_ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ ) UpperCAmelCase_ = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) UpperCAmelCase_ = special_tok_ids UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCAmelCase_ = 0.0 # do not predict special tokens UpperCAmelCase_ = torch.from_numpy(snake_case_ ) else: UpperCAmelCase_ = None UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ ) logger.info("Data loader created." ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config ) UpperCAmelCase_ = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ ) else: UpperCAmelCase_ = student_model_class(snake_case_ ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info("Student loaded." ) # TEACHER # UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case_ , snake_case_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case_ , snake_case_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCAmelCase_ = Distiller( params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ ) distiller.train() logger.info("Let's go get some drinks." ) if __name__ == "__main__": main()
78
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : int = ["input_features"] def __init__( self : Dict , _snake_case : str=80 , _snake_case : List[Any]=1_60_00 , _snake_case : str=1_60 , _snake_case : Any=30 , _snake_case : Dict=4_00 , _snake_case : int=0.0 , _snake_case : Tuple=False , **_snake_case : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , return_attention_mask=_snake_case , **_snake_case , ) A__ = n_fft A__ = hop_length A__ = chunk_length A__ = chunk_length * sampling_rate A__ = self.n_samples // hop_length A__ = sampling_rate A__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_snake_case , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_snake_case , norm='slaney' , mel_scale='slaney' , ) def _a ( self : List[str] , _snake_case : np.array ): """simple docstring""" A__ = spectrogram( _snake_case , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , ) A__ = log_spec[:, :-1] A__ = np.maximum(_snake_case , log_spec.max() - 8.0 ) A__ = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ): """simple docstring""" if attention_mask is not None: A__ = np.array(_snake_case , np.intaa ) A__ = [] for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ): A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: A__ = padding_value normed_input_values.append(_snake_case ) else: A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : str , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = True , _snake_case : Optional[int] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[str] = "max_length" , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , **_snake_case : Optional[int] , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) A__ = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) A__ = is_batched_numpy or ( isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_snake_case , np.ndarray ): A__ = np.asarray(_snake_case , dtype=np.floataa ) elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A__ = [np.asarray([raw_speech] ).T] A__ = BatchFeature({'input_features': raw_speech} ) # convert into correct format for padding A__ = self.pad( _snake_case , padding=_snake_case , max_length=max_length if max_length else self.n_samples , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: A__ = self.zero_mean_unit_var_norm( padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , ) A__ = np.stack(padded_inputs['input_features'] , axis=0 ) # make sure list is in array format A__ = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 ) A__ = [self._np_extract_fbank_features(_snake_case ) for waveform in input_features[0]] if isinstance(input_features[0] , _snake_case ): A__ = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features] else: A__ = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) A__ = padded_inputs['attention_mask'][:, :: self.hop_length] if return_tensors is not None: A__ = padded_inputs.convert_to_tensors(_snake_case ) return padded_inputs def _a ( self : Optional[Any] ): """simple docstring""" A__ = copy.deepcopy(self.__dict__ ) A__ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
9
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : int = AutoencoderKL a__ : Optional[Any] = """sample""" a__ : Union[str, Any] = 1e-2 @property def _lowercase (self : Optional[int] ): UpperCAmelCase_ = 4 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a ) return {"sample": image} @property def _lowercase (self : Any ): return (3, 32, 32) @property def _lowercase (self : Dict ): return (3, 32, 32) def _lowercase (self : int ): UpperCAmelCase_ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict def _lowercase (self : int ): pass def _lowercase (self : int ): pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _lowercase (self : List[Any] ): # enable deterministic behavior for gradient checkpointing UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ = self.model_class(**__a ) model.to(__a ) assert not model.is_gradient_checkpointing and model.training UpperCAmelCase_ = model(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() UpperCAmelCase_ = torch.randn_like(__a ) UpperCAmelCase_ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing UpperCAmelCase_ = self.model_class(**__a ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__a ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training UpperCAmelCase_ = model_a(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() UpperCAmelCase_ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) UpperCAmelCase_ = dict(model.named_parameters() ) UpperCAmelCase_ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _lowercase (self : Any ): UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a ) self.assertIsNotNone(__a ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__a ) UpperCAmelCase_ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _lowercase (self : List[str] ): UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) UpperCAmelCase_ = model.to(__a ) model.eval() if torch_device == "mps": UpperCAmelCase_ = torch.manual_seed(0 ) else: UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase_ = image.to(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": UpperCAmelCase_ = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": UpperCAmelCase_ = torch.tensor( [-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] ) else: UpperCAmelCase_ = torch.tensor( [-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] ) self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) ) @slow class __A ( unittest.TestCase ): def _lowercase (self : Dict , __a : Dict , __a : int ): return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy""" def _lowercase (self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ): UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a ) return image def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ): UpperCAmelCase_ = "fp16" if fpaa else None UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ = AutoencoderKL.from_pretrained( __a , subfolder="vae" , torch_dtype=__a , revision=__a , ) model.to(__a ).eval() return model def _lowercase (self : List[Any] , __a : List[Any]=0 ): if torch_device == "mps": return torch.manual_seed(__a ) return torch.Generator(device=__a ).manual_seed(__a ) @parameterized.expand( [ # fmt: off [33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__a , __a , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]], [47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : Dict , __a : Optional[int] , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__a , __a , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]], [37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : int , __a : int , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]], [16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase (self : List[str] , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase (self : Union[str, Any] , __a : Dict ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]], [47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]], # fmt: on ] ) def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model.encode(__a ).latent_dist UpperCAmelCase_ = dist.sample(generator=__a ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu() UpperCAmelCase_ = torch.tensor(__a ) UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(__a , __a , atol=__a )
78
0
import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib _lowerCAmelCase = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _lowerCAmelCase = logging.WARNING def _snake_case ( ): _UpperCamelCase = os.getenv('''DATASETS_VERBOSITY''' , __snake_case ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def _snake_case ( ): return __name__.split('''.''' )[0] def _snake_case ( ): return logging.getLogger(_get_library_name() ) def _snake_case ( ): # Apply our default configuration to the library root logger. _UpperCamelCase = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def _snake_case ( ): _UpperCamelCase = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def _snake_case ( __snake_case = None ): if name is None: _UpperCamelCase = _get_library_name() return logging.getLogger(__snake_case ) def _snake_case ( ): return _get_library_root_logger().getEffectiveLevel() def _snake_case ( __snake_case ): _get_library_root_logger().setLevel(__snake_case ) def _snake_case ( ): return set_verbosity(__snake_case ) def _snake_case ( ): return set_verbosity(__snake_case ) def _snake_case ( ): return set_verbosity(__snake_case ) def _snake_case ( ): return set_verbosity(__snake_case ) def _snake_case ( ): _UpperCamelCase = False def _snake_case ( ): _UpperCamelCase = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class lowerCAmelCase_ : def __init__( self : Any , *_A : Union[str, Any] , **_A : str ): # pylint: disable=unused-argument _UpperCamelCase = args[0] if args else None def __iter__( self : Optional[Any] ): return iter(self._iterator ) def __getattr__( self : Any , _A : Optional[int] ): def empty_fn(*_A : List[Any] , **_A : List[str] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : str ): return self def __exit__( self : str , _A : List[str] , _A : Dict , _A : str ): return _lowerCAmelCase = True class lowerCAmelCase_ : def __call__( self : int , *_A : Union[str, Any] , _A : Optional[int]=False , **_A : Dict ): if _tqdm_active and not disable: return tqdm_lib.tqdm(*_A , **_A ) else: return EmptyTqdm(*_A , **_A ) def UpperCamelCase_ ( self : int , *_A : str , **_A : Optional[int] ): _UpperCamelCase = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*_A , **_A ) def UpperCamelCase_ ( self : Union[str, Any] ): if _tqdm_active: return tqdm_lib.tqdm.get_lock() _lowerCAmelCase = _tqdm_cls() def _snake_case ( ): global _tqdm_active return bool(_tqdm_active ) def _snake_case ( ): global _tqdm_active _UpperCamelCase = True def _snake_case ( ): global _tqdm_active _UpperCamelCase = False
10
'''simple docstring''' import logging from transformers import PretrainedConfig SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__) SCREAMING_SNAKE_CASE_: Any ={ 'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json', } class __A ( UpperCamelCase__ ): a__ : List[Any] = """bertabs""" def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ): super().__init__(**__a ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_pos UpperCAmelCase_ = enc_layers UpperCAmelCase_ = enc_hidden_size UpperCAmelCase_ = enc_heads UpperCAmelCase_ = enc_ff_size UpperCAmelCase_ = enc_dropout UpperCAmelCase_ = dec_layers UpperCAmelCase_ = dec_hidden_size UpperCAmelCase_ = dec_heads UpperCAmelCase_ = dec_ff_size UpperCAmelCase_ = dec_dropout
78
0
'''simple docstring''' def lowerCAmelCase (__A): """simple docstring""" if isinstance(__A , __A): raise TypeError('''\'float\' object cannot be interpreted as an integer''') if isinstance(__A , __A): raise TypeError('''\'str\' object cannot be interpreted as an integer''') if num == 0: return "0b0" _a = False if num < 0: _a = True _a = -num _a = [] while num > 0: binary.insert(0 , num % 2) num >>= 1 if negative: return "-0b" + "".join(str(__A) for e in binary) return "0b" + "".join(str(__A) for e in binary) if __name__ == "__main__": import doctest doctest.testmod()
11
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "imagenet-1k-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = {v: k for k, v in idalabel.items()} UpperCAmelCase_ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" UpperCAmelCase_ = BitConfig( conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , ) return config def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if "stem.conv" in name: UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: UpperCAmelCase_ = name.replace("blocks" , "layers" ) if "head.fc" in name: UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): UpperCAmelCase_ = "bit." + name if "bit" not in name and "classifier" not in name: UpperCAmelCase_ = "bit.encoder." + name return name def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = get_config(snake_case_ ) # load original model from timm UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ ) timm_model.eval() # load state_dict of original model UpperCAmelCase_ = timm_model.state_dict() for key in state_dict.copy().keys(): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val.squeeze() if "head" in key else val # load HuggingFace model UpperCAmelCase_ = BitForImageClassification(snake_case_ ) model.eval() model.load_state_dict(snake_case_ ) # create image processor UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) ) UpperCAmelCase_ = transform.transforms UpperCAmelCase_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } UpperCAmelCase_ = BitImageProcessor( do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ) UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(snake_case_ , snake_case_ ) # verify logits with torch.no_grad(): UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) UpperCAmelCase_ = timm_model(snake_case_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) if push_to_hub: print(f"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(f"""ybelkada/{model_name}""" ) processor.push_to_hub(f"""ybelkada/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
78
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCamelCase__ : Union[str, Any] = 1_6 lowerCamelCase__ : List[Any] = 3_2 def UpperCamelCase ( lowercase_ , lowercase_ = 16 ) -> List[str]: '''simple docstring''' lowercase__ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowercase__ : Optional[int] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase_ ): # max_length=None => use the model max length (it's actually the default) lowercase__ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__ : List[str] = datasets.map( lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__ : Dict = 16 elif accelerator.mixed_precision != "no": lowercase__ : str = 8 else: lowercase__ : Dict = None return tokenizer.pad( lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowercase__ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) lowercase__ : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCamelCase__ : str = mocked_dataloaders # noqa: F811 def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]: '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase_ ) == "1": lowercase__ : List[str] = 2 # New Code # lowercase__ : List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator lowercase__ : Optional[int] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase_ ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Dict = config["""lr"""] lowercase__ : Dict = int(config["""num_epochs"""] ) lowercase__ : int = int(config["""seed"""] ) lowercase__ : int = int(config["""batch_size"""] ) lowercase__ : str = evaluate.load("""glue""" , """mrpc""" ) set_seed(lowercase_ ) lowercase__ , lowercase__ : Dict = get_dataloaders(lowercase_ , lowercase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__ : str = model.to(accelerator.device ) # Instantiate optimizer lowercase__ : Any = AdamW(params=model.parameters() , lr=lowercase_ ) # Instantiate scheduler lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=lowercase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowercase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Now we train the model for epoch in range(lowercase_ ): model.train() for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase_ ): lowercase__ : List[str] = model(**lowercase_ ) lowercase__ : Any = output.loss accelerator.backward(lowercase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ : List[str] = model(**lowercase_ ) lowercase__ : Optional[Any] = outputs.logits.argmax(dim=-1 ) lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase_ , references=lowercase_ , ) lowercase__ : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowercase_ ) def UpperCamelCase ( ) -> str: '''simple docstring''' lowercase__ : int = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=lowercase_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowercase__ : str = parser.parse_args() lowercase__ : List[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase_ , lowercase_ ) if __name__ == "__main__": main()
12
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): UpperCAmelCase_ = 0 def _lowercase (self : Tuple ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" ) self.assertIsInstance(__a , __a ) def _lowercase (self : str ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = CLIPConfig() # Create a dummy config file with image_proceesor_type UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict() config_dict.pop("image_processor_type" ) UpperCAmelCase_ = CLIPImageProcessor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) # make sure private variable is not incorrectly saved UpperCAmelCase_ = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def _lowercase (self : int ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Tuple ): with self.assertRaisesRegex( __a , "clip-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" ) def _lowercase (self : Optional[int] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" ) def _lowercase (self : Union[str, Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" ) def _lowercase (self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" ) def _lowercase (self : Optional[int] ): try: AutoConfig.register("custom" , __a ) AutoImageProcessor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoImageProcessor.register(__a , __a ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _lowercase (self : Optional[int] ): class __A ( UpperCamelCase__ ): a__ : str = True try: AutoConfig.register("custom" , __a ) AutoImageProcessor.register(__a , __a ) # If remote code is not set, the default is to use local UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
78
0
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase_ : list[list[int | float]] ) -> int: __lowerCamelCase : Tuple = len(UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = len(matrix[0] ) __lowerCamelCase : Dict = min(UpperCAmelCase_ , UpperCAmelCase_ ) for row in range(UpperCAmelCase_ ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , UpperCAmelCase_ ): __lowerCamelCase : Optional[int] = matrix[col][row] / matrix[row][row] for i in range(UpperCAmelCase_ , UpperCAmelCase_ ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows __lowerCamelCase : Optional[Any] = True for i in range(row + 1 , UpperCAmelCase_ ): if matrix[i][row] != 0: __lowerCamelCase , __lowerCamelCase : List[str] = matrix[i], matrix[row] __lowerCamelCase : str = False break if reduce: rank -= 1 for i in range(UpperCAmelCase_ ): __lowerCamelCase : str = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False @dataclass class __A : a__ : Optional[int] = None a__ : bool = True a__ : bool = True a__ : Optional[str] = None # Automatically constructed a__ : ClassVar[str] = "dict" a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ ) def __call__(self : Optional[Any] ): return self.pa_type def _lowercase (self : str , __a : Union[str, bytes, dict] ): try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(__a , __a ): return {"bytes": None, "path": value} elif isinstance(__a , __a ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes UpperCAmelCase_ = BytesIO() sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767 else: UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767 UpperCAmelCase_ = BytesIO(bytes() ) sf.write(__a , __a , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ): if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: UpperCAmelCase_ = token_per_repo_id or {} UpperCAmelCase_ = path.split("::" )[-1] try: UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"] UpperCAmelCase_ = token_per_repo_id[repo_id] except (ValueError, KeyError): UpperCAmelCase_ = None with xopen(__a , "rb" , use_auth_token=__a ) as f: UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a ) else: UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a ) UpperCAmelCase_ = array.T if self.mono: UpperCAmelCase_ = librosa.to_mono(__a ) if self.sampling_rate and self.sampling_rate != sampling_rate: UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate ) UpperCAmelCase_ = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def _lowercase (self : Dict ): from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ): if pa.types.is_string(storage.type ): UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() ) UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: UpperCAmelCase_ = storage.field("bytes" ) else: UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: UpperCAmelCase_ = storage.field("path" ) else: UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) return array_cast(__a , self.pa_type ) def _lowercase (self : Dict , __a : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(__a : Tuple ): with xopen(__a , "rb" ) as f: UpperCAmelCase_ = f.read() return bytes_ UpperCAmelCase_ = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) UpperCAmelCase_ = pa.array( [os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(__a , self.pa_type )
78
0
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def __UpperCAmelCase ( __a : Any ) -> str: """simple docstring""" _a : List[str] = fname.split(os.path.sep )[-1] return re.search(R'''^(.*)_\d+\.jpg$''' ,__a ).groups()[0] class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __init__( self , _a , _a=None , _a=None ) -> Dict: _a : Optional[int] = file_names _a : List[str] = image_transform _a : List[Any] = label_to_id def __len__( self ) -> Any: return len(self.file_names ) def __getitem__( self , _a ) -> Tuple: _a : Union[str, Any] = self.file_names[idx] _a : Tuple = PIL.Image.open(_a ) _a : Dict = raw_image.convert('''RGB''' ) if self.image_transform is not None: _a : Tuple = self.image_transform(_a ) _a : Dict = extract_label(_a ) if self.label_to_id is not None: _a : Any = self.label_to_id[label] return {"image": image, "label": label} def __UpperCAmelCase ( __a : str ,__a : int ) -> Union[str, Any]: """simple docstring""" if args.with_tracking: _a : Tuple = Accelerator( cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with='''all''' ,project_dir=args.project_dir ) else: _a : int = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _a : List[Any] = config['''lr'''] _a : Optional[Any] = int(config['''num_epochs'''] ) _a : List[Any] = int(config['''seed'''] ) _a : Optional[Any] = int(config['''batch_size'''] ) _a : str = config['''image_size'''] if not isinstance(__a ,(list, tuple) ): _a : Optional[int] = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps ,'''isdigit''' ): if args.checkpointing_steps == "epoch": _a : List[str] = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): _a : int = int(args.checkpointing_steps ) else: raise ValueError( F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: _a : Optional[int] = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: _a : List[str] = os.path.split(__a )[-1].split('''.''' )[0] accelerator.init_trackers(__a ,__a ) # Grab all the image filenames _a : Dict = [os.path.join(args.data_dir ,__a ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )] # Build the label correspondences _a : Dict = [extract_label(__a ) for fname in file_names] _a : str = list(set(__a ) ) id_to_label.sort() _a : List[str] = {lbl: i for i, lbl in enumerate(__a )} # Set the seed before splitting the data. np.random.seed(__a ) torch.manual_seed(__a ) torch.cuda.manual_seed_all(__a ) # Split our filenames between train and validation _a : str = np.random.permutation(len(__a ) ) _a : List[Any] = int(0.8 * len(__a ) ) _a : Dict = random_perm[:cut] _a : Any = random_perm[cut:] # For training we use a simple RandomResizedCrop _a : Optional[int] = Compose([RandomResizedCrop(__a ,scale=(0.5, 1.0) ), ToTensor()] ) _a : Any = PetsDataset( [file_names[i] for i in train_split] ,image_transform=__a ,label_to_id=__a ) # For evaluation, we use a deterministic Resize _a : Dict = Compose([Resize(__a ), ToTensor()] ) _a : Optional[Any] = PetsDataset([file_names[i] for i in eval_split] ,image_transform=__a ,label_to_id=__a ) # Instantiate dataloaders. _a : Tuple = DataLoader(__a ,shuffle=__a ,batch_size=__a ,num_workers=4 ) _a : Optional[Any] = DataLoader(__a ,shuffle=__a ,batch_size=__a ,num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _a : str = create_model('''resnet50d''' ,pretrained=__a ,num_classes=len(__a ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _a : int = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): _a : Optional[int] = False for param in model.get_classifier().parameters(): _a : Any = True # We normalize the batches of images to be a bit faster. _a : int = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device ) _a : Dict = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer _a : Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=lr / 25 ) # Instantiate learning rate scheduler _a : Optional[int] = OneCycleLR(optimizer=__a ,max_lr=__a ,epochs=__a ,steps_per_epoch=len(__a ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _a , _a , _a , _a , _a : Any = accelerator.prepare( __a ,__a ,__a ,__a ,__a ) # We need to keep track of how many total steps we have iterated over _a : Optional[int] = 0 # We also need to keep track of the starting epoch so files are named properly _a : List[str] = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) _a : Tuple = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint _a : Optional[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) _a : List[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` _a : Optional[int] = os.path.splitext(__a )[0] if "epoch" in training_difference: _a : List[Any] = int(training_difference.replace('''epoch_''' ,'''''' ) ) + 1 _a : str = None else: _a : str = int(training_difference.replace('''step_''' ,'''''' ) ) _a : Any = resume_step // len(__a ) resume_step -= starting_epoch * len(__a ) # Now we train the model for epoch in range(__a ,__a ): model.train() if args.with_tracking: _a : List[Any] = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step _a : List[Any] = accelerator.skip_first_batches(__a ,__a ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader _a : str = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. _a : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()} _a : Dict = (batch['''image'''] - mean) / std _a : Optional[int] = model(__a ) _a : Union[str, Any] = torch.nn.functional.cross_entropy(__a ,batch['''label'''] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(__a ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(__a ,__a ): _a : List[str] = F"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: _a : Dict = os.path.join(args.output_dir ,__a ) accelerator.save_state(__a ) model.eval() _a : Union[str, Any] = 0 _a : List[str] = 0 for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. _a : str = {k: v.to(accelerator.device ) for k, v in batch.items()} _a : Tuple = (batch['''image'''] - mean) / std with torch.no_grad(): _a : int = model(__a ) _a : Tuple = outputs.argmax(dim=-1 ) _a , _a : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''label''']) ) _a : Union[str, Any] = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() _a : Any = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { '''accuracy''': 100 * eval_metric, '''train_loss''': total_loss.item() / len(__a ), '''epoch''': epoch, } ,step=__a ,) if checkpointing_steps == "epoch": _a : str = F"""epoch_{epoch}""" if args.output_dir is not None: _a : Optional[int] = os.path.join(args.output_dir ,__a ) accelerator.save_state(__a ) if args.with_tracking: accelerator.end_training() def __UpperCAmelCase ( ) -> List[Any]: """simple docstring""" _a : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument('''--data_dir''' ,required=__a ,help='''The data folder on disk.''' ) parser.add_argument('''--fp16''' ,action='''store_true''' ,help='''If passed, will use FP16 training.''' ) parser.add_argument( '''--mixed_precision''' ,type=__a ,default=__a ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' ,) parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--checkpointing_steps''' ,type=__a ,default=__a ,help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' ,) parser.add_argument( '''--output_dir''' ,type=__a ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,) parser.add_argument( '''--resume_from_checkpoint''' ,type=__a ,default=__a ,help='''If the training should continue from a checkpoint folder.''' ,) parser.add_argument( '''--with_tracking''' ,action='''store_true''' ,help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' ,) parser.add_argument( '''--project_dir''' ,type=__a ,default='''logs''' ,help='''Location on where to store experiment tracking logs` and relevent project information''' ,) _a : str = parser.parse_args() _a : Optional[Any] = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224} training_function(__a ,__a ) if __name__ == "__main__": main()
14
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" ) UpperCAmelCase_ = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ ) return image def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if "visual_encoder" in key: UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ ) if "blocks" in key: UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ ) if "attn" in key: UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ ) if "norm1" in key: UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ ) if "norm2" in key: UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ ) if "encoder.norm" in key: UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ ) if "encoder.patch_embed.proj" in key: UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ ) if "encoder.pos_embed" in key: UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ ) if "encoder.cls_token" in key: UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ ) if "self_attn" in key: UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ ) return key @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ ) else: UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} ) UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval() UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth" UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" ) UpperCAmelCase_ = pt_model.eval() UpperCAmelCase_ = pt_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value hf_model.load_state_dict(snake_case_ ) UpperCAmelCase_ = 3_84 UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" ) UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ ) assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] UpperCAmelCase_ = hf_model.generate(snake_case_ ) assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(snake_case_ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' UpperCAmelCase_ = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth" ) UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" ) vqa_model.eval() UpperCAmelCase_ = vqa_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ ) hf_vqa_model.load_state_dict(snake_case_ ) UpperCAmelCase_ = ["How many dogs are in this image?"] UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" ) UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth" UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" ) itm_model.eval() UpperCAmelCase_ = itm_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ ) UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"] UpperCAmelCase_ = tokenizer( snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(snake_case_ ) hf_itm_model.eval() UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ ) UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE_: int =parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
78
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging A : Tuple = logging.get_logger(__name__) class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = ['''pixel_values'''] def __init__(self : Optional[int] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : int = 8 , **_UpperCAmelCase : int , ) -> None: """simple docstring""" super().__init__(**_UpperCAmelCase ) lowercase__ = do_rescale lowercase__ = rescale_factor lowercase__ = do_pad lowercase__ = pad_size def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple ) -> np.ndarray: """simple docstring""" return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> int: """simple docstring""" lowercase__ , lowercase__ = get_image_size(_UpperCAmelCase ) lowercase__ = (old_height // size + 1) * size - old_height lowercase__ = (old_width // size + 1) * size - old_width return pad(_UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_UpperCAmelCase ) def lowerCamelCase__ (self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_UpperCAmelCase : List[str] , ) -> int: """simple docstring""" lowercase__ = do_rescale if do_rescale is not None else self.do_rescale lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ = do_pad if do_pad is not None else self.do_pad lowercase__ = pad_size if pad_size is not None else self.pad_size lowercase__ = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. lowercase__ = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_rescale: lowercase__ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_pad: lowercase__ = [self.pad(_UpperCAmelCase , size=_UpperCAmelCase ) for image in images] lowercase__ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] lowercase__ = {"""pixel_values""": images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
15
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case_ : Optional[int] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case_ : Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCAmelCase_ = [] for i in range(snake_case_ ): UpperCAmelCase_ = i / num_diffusion_timesteps UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) ) return torch.tensor(snake_case_ , dtype=torch.floataa ) class __A ( UpperCamelCase__ , UpperCamelCase__ ): a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers] a__ : Optional[Any] = 2 @register_to_config def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ): if trained_betas is not None: UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa ) elif beta_schedule == "linear": UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase_ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" ) elif beta_schedule == "exp": UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) UpperCAmelCase_ = 1.0 - self.betas UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__a , __a , __a ) UpperCAmelCase_ = use_karras_sigmas def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ): if schedule_timesteps is None: UpperCAmelCase_ = self.timesteps UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: UpperCAmelCase_ = 1 if len(__a ) > 1 else 0 else: UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep UpperCAmelCase_ = self._index_counter[timestep_int] return indices[pos].item() @property def _lowercase (self : List[Any] ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ): UpperCAmelCase_ = self.index_for_timestep(__a ) UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5) return sample def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ): UpperCAmelCase_ = num_inference_steps UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy() elif self.config.timestep_spacing == "leading": UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) UpperCAmelCase_ = np.log(__a ) UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a ) if self.config.use_karras_sigmas: UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps ) UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] ) UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a ) UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) UpperCAmelCase_ = torch.from_numpy(__a ) UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(__a ).startswith("mps" ): # mps does not support float64 UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa ) else: UpperCAmelCase_ = timesteps.to(device=__a ) # empty dt and derivative UpperCAmelCase_ = None UpperCAmelCase_ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter UpperCAmelCase_ = defaultdict(__a ) def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ): # get log sigma UpperCAmelCase_ = np.log(__a ) # get distribution UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) UpperCAmelCase_ = low_idx + 1 UpperCAmelCase_ = log_sigmas[low_idx] UpperCAmelCase_ = log_sigmas[high_idx] # interpolate sigmas UpperCAmelCase_ = (low - log_sigma) / (low - high) UpperCAmelCase_ = np.clip(__a , 0 , 1 ) # transform interpolation to time range UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx UpperCAmelCase_ = t.reshape(sigma.shape ) return t def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ): UpperCAmelCase_ = in_sigmas[-1].item() UpperCAmelCase_ = in_sigmas[0].item() UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper UpperCAmelCase_ = np.linspace(0 , 1 , __a ) UpperCAmelCase_ = sigma_min ** (1 / rho) UpperCAmelCase_ = sigma_max ** (1 / rho) UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _lowercase (self : List[str] ): return self.dt is None def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ): UpperCAmelCase_ = self.index_for_timestep(__a ) # advance index counter by 1 UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method UpperCAmelCase_ = self.sigmas[step_index - 1] UpperCAmelCase_ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API UpperCAmelCase_ = 0 UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": UpperCAmelCase_ = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: UpperCAmelCase_ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep UpperCAmelCase_ = sigma_next - sigma_hat # store for 2nd order step UpperCAmelCase_ = derivative UpperCAmelCase_ = dt UpperCAmelCase_ = sample else: # 2. 2nd order / Heun's method UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next UpperCAmelCase_ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample UpperCAmelCase_ = self.dt UpperCAmelCase_ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__a ) def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__a ): # mps does not support float64 UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: UpperCAmelCase_ = self.timesteps.to(original_samples.device ) UpperCAmelCase_ = timesteps.to(original_samples.device ) UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps] UpperCAmelCase_ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): UpperCAmelCase_ = sigma.unsqueeze(-1 ) UpperCAmelCase_ = original_samples + noise * sigma return noisy_samples def __len__(self : str ): return self.config.num_train_timesteps
78
0
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __A : Dict = logging.get_logger(__name__) # General docstring __A : List[Any] = 'MobileNetV1Config' # Base docstring __A : List[str] = 'google/mobilenet_v1_1.0_224' __A : Any = [1, 1_0_2_4, 7, 7] # Image classification docstring __A : List[str] = 'google/mobilenet_v1_1.0_224' __A : str = 'tabby, tabby cat' __A : int = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def __a ( A__ : Dict , A__ : Union[str, Any] , A__ : Optional[int]=None ): SCREAMING_SNAKE_CASE = {} if isinstance(A__ , A__ ): SCREAMING_SNAKE_CASE = model.mobilenet_va else: SCREAMING_SNAKE_CASE = model SCREAMING_SNAKE_CASE = "MobilenetV1/Conv2d_0/" SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var for i in range(13 ): SCREAMING_SNAKE_CASE = i + 1 SCREAMING_SNAKE_CASE = i * 2 SCREAMING_SNAKE_CASE = backbone.layer[pt_index] SCREAMING_SNAKE_CASE = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" SCREAMING_SNAKE_CASE = pointer.convolution.weight SCREAMING_SNAKE_CASE = pointer.normalization.bias SCREAMING_SNAKE_CASE = pointer.normalization.weight SCREAMING_SNAKE_CASE = pointer.normalization.running_mean SCREAMING_SNAKE_CASE = pointer.normalization.running_var SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1] SCREAMING_SNAKE_CASE = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" SCREAMING_SNAKE_CASE = pointer.convolution.weight SCREAMING_SNAKE_CASE = pointer.normalization.bias SCREAMING_SNAKE_CASE = pointer.normalization.weight SCREAMING_SNAKE_CASE = pointer.normalization.running_mean SCREAMING_SNAKE_CASE = pointer.normalization.running_var if isinstance(A__ , A__ ): SCREAMING_SNAKE_CASE = "MobilenetV1/Logits/Conv2d_1c_1x1/" SCREAMING_SNAKE_CASE = model.classifier.weight SCREAMING_SNAKE_CASE = model.classifier.bias return tf_to_pt_map def __a ( A__ : List[str] , A__ : Optional[int] , A__ : List[Any] ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model SCREAMING_SNAKE_CASE = tf.train.list_variables(A__ ) SCREAMING_SNAKE_CASE = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) SCREAMING_SNAKE_CASE = tf.train.load_variable(A__ , A__ ) SCREAMING_SNAKE_CASE = array # Build TF to PyTorch weights loading map SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(A__ , A__ , A__ ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue SCREAMING_SNAKE_CASE = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise" ) SCREAMING_SNAKE_CASE = np.transpose(A__ , (2, 3, 0, 1) ) elif "weights" in name: logger.info("Transposing" ) if len(pointer.shape ) == 2: # copying into linear layer SCREAMING_SNAKE_CASE = array.squeeze().transpose() else: SCREAMING_SNAKE_CASE = np.transpose(A__ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) SCREAMING_SNAKE_CASE = torch.from_numpy(A__ ) tf_weights.pop(A__ , A__ ) tf_weights.pop(name + "/RMSProp" , A__ ) tf_weights.pop(name + "/RMSProp_1" , A__ ) tf_weights.pop(name + "/ExponentialMovingAverage" , A__ ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def __a ( A__ : torch.Tensor , A__ : nn.Convad ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = features.shape[-2:] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = conv_layer.stride SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = conv_layer.kernel_size if in_height % stride_height == 0: SCREAMING_SNAKE_CASE = max(kernel_height - stride_height , 0 ) else: SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: SCREAMING_SNAKE_CASE = max(kernel_width - stride_width , 0 ) else: SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) , 0 ) SCREAMING_SNAKE_CASE = pad_along_width // 2 SCREAMING_SNAKE_CASE = pad_along_width - pad_left SCREAMING_SNAKE_CASE = pad_along_height // 2 SCREAMING_SNAKE_CASE = pad_along_height - pad_top SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(A__ , A__ , "constant" , 0.0 ) class _SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Any , __lowerCamelCase : MobileNetVaConfig , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : Optional[bool or str] = True , ): super().__init__() SCREAMING_SNAKE_CASE = config if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." ) if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." ) SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) SCREAMING_SNAKE_CASE = nn.Convad( in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=__lowerCamelCase , stride=__lowerCamelCase , padding=__lowerCamelCase , groups=__lowerCamelCase , bias=__lowerCamelCase , padding_mode="zeros" , ) if use_normalization: SCREAMING_SNAKE_CASE = nn.BatchNormad( num_features=__lowerCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=__lowerCamelCase , track_running_stats=__lowerCamelCase , ) else: SCREAMING_SNAKE_CASE = None if use_activation: if isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE = ACTaFN[use_activation] elif isinstance(config.hidden_act , __lowerCamelCase ): SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act] else: SCREAMING_SNAKE_CASE = config.hidden_act else: SCREAMING_SNAKE_CASE = None def _snake_case ( self : Dict , __lowerCamelCase : torch.Tensor ): if self.config.tf_padding: SCREAMING_SNAKE_CASE = apply_tf_padding(__lowerCamelCase , self.convolution ) SCREAMING_SNAKE_CASE = self.convolution(__lowerCamelCase ) if self.normalization is not None: SCREAMING_SNAKE_CASE = self.normalization(__lowerCamelCase ) if self.activation is not None: SCREAMING_SNAKE_CASE = self.activation(__lowerCamelCase ) return features class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = MobileNetVaConfig lowerCamelCase__ = load_tf_weights_in_mobilenet_va lowerCamelCase__ = "mobilenet_v1" lowerCamelCase__ = "pixel_values" lowerCamelCase__ = False def _snake_case ( self : List[Any] , __lowerCamelCase : Union[nn.Linear, nn.Convad] ): if isinstance(__lowerCamelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__lowerCamelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __A : int = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __A : List[str] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , __snake_case , ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : str , __lowerCamelCase : MobileNetVaConfig , __lowerCamelCase : bool = True ): super().__init__(__lowerCamelCase ) SCREAMING_SNAKE_CASE = config SCREAMING_SNAKE_CASE = 32 SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth ) SCREAMING_SNAKE_CASE = MobileNetVaConvLayer( __lowerCamelCase , in_channels=config.num_channels , out_channels=__lowerCamelCase , kernel_size=3 , stride=2 , ) SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] SCREAMING_SNAKE_CASE = nn.ModuleList() for i in range(13 ): SCREAMING_SNAKE_CASE = out_channels if strides[i] == 2 or i == 0: depth *= 2 SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCamelCase , ) ) self.layer.append( MobileNetVaConvLayer( __lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=1 , ) ) SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict ): raise NotImplementedError @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self : List[str] , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , ): SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) SCREAMING_SNAKE_CASE = self.conv_stem(__lowerCamelCase ) SCREAMING_SNAKE_CASE = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): SCREAMING_SNAKE_CASE = layer_module(__lowerCamelCase ) if output_hidden_states: SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,) SCREAMING_SNAKE_CASE = hidden_states if self.pooler is not None: SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(__lowerCamelCase ) , start_dim=1 ) else: SCREAMING_SNAKE_CASE = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCamelCase , pooler_output=__lowerCamelCase , hidden_states=__lowerCamelCase , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __snake_case , ) class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' def __init__( self : Optional[Any] , __lowerCamelCase : MobileNetVaConfig ): super().__init__(__lowerCamelCase ) SCREAMING_SNAKE_CASE = config.num_labels SCREAMING_SNAKE_CASE = MobileNetVaModel(__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCamelCase ) SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , ): SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE = self.mobilenet_va(__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase ) SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1] SCREAMING_SNAKE_CASE = self.classifier(self.dropout(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: SCREAMING_SNAKE_CASE = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): SCREAMING_SNAKE_CASE = "single_label_classification" else: SCREAMING_SNAKE_CASE = "multi_label_classification" if self.config.problem_type == "regression": SCREAMING_SNAKE_CASE = MSELoss() if self.num_labels == 1: SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() ) else: SCREAMING_SNAKE_CASE = loss_fct(__lowerCamelCase , __lowerCamelCase ) elif self.config.problem_type == "single_label_classification": SCREAMING_SNAKE_CASE = CrossEntropyLoss() SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": SCREAMING_SNAKE_CASE = BCEWithLogitsLoss() SCREAMING_SNAKE_CASE = loss_fct(__lowerCamelCase , __lowerCamelCase ) if not return_dict: SCREAMING_SNAKE_CASE = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states , )
16
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __A ( UpperCamelCase__ ): a__ : List[str] = """Salesforce/blip-image-captioning-base""" a__ : Optional[Any] = ( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) a__ : str = """image_captioner""" a__ : List[str] = AutoModelForVisionaSeq a__ : int = ["""image"""] a__ : Optional[Any] = ["""text"""] def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ): requires_backends(self , ["vision"] ) super().__init__(*__a , **__a ) def _lowercase (self : Union[str, Any] , __a : "Image" ): return self.pre_processor(images=__a , return_tensors="pt" ) def _lowercase (self : List[str] , __a : Dict ): return self.model.generate(**__a ) def _lowercase (self : int , __a : Optional[Any] ): return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
78
0
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> list[int]: if num <= 0: raise ValueError("""Input must be a positive integer""" ) __A : Any = [True] * (num + 1) __A : Optional[int] = 2 while p * p <= num: if primes[p]: for i in range(p * p ,num + 1 ,a__ ): __A : Tuple = False p += 1 return [prime for prime in range(2 ,num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : str = int(input('''Enter a positive integer: ''').strip()) print(prime_sieve_eratosthenes(user_num))
17
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]: '''simple docstring''' UpperCAmelCase_ = [] if isinstance(snake_case_ , snake_case_ ): for v in tree.values(): shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]: '''simple docstring''' UpperCAmelCase_ = [] for d in reversed(snake_case_ ): idx.append(flat_idx % d ) UpperCAmelCase_ = flat_idx // d return tuple(reversed(snake_case_ ) ) @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]: '''simple docstring''' def reduce_edge_list(snake_case_ : List[bool] ) -> None: UpperCAmelCase_ = True for i in range(len(snake_case_ ) ): UpperCAmelCase_ = -1 * (i + 1) l[reversed_idx] &= tally UpperCAmelCase_ = l[reversed_idx] if start_edges is None: UpperCAmelCase_ = [s == 0 for s in start] reduce_edge_list(snake_case_ ) if end_edges is None: UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )] reduce_edge_list(snake_case_ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(snake_case_ ) == 0: return [()] elif len(snake_case_ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] UpperCAmelCase_ = [] UpperCAmelCase_ = [] # Dimensions common to start and end can be selected directly for s, e in zip(snake_case_ , snake_case_ ): if s == e: path_list.append(slice(snake_case_ , s + 1 ) ) else: break UpperCAmelCase_ = tuple(snake_case_ ) UpperCAmelCase_ = len(snake_case_ ) # start == end, and we're done if divergence_idx == len(snake_case_ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase_ = start[divergence_idx] return tuple( path + (slice(snake_case_ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase_ = end[divergence_idx] return tuple( path + (slice(snake_case_ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor: '''simple docstring''' UpperCAmelCase_ = t.shape[:no_batch_dims] UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) ) # _get_minimal_slice_set is inclusive UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) ) # Get an ordered list of slices to perform UpperCAmelCase_ = _get_minimal_slice_set( snake_case_ , snake_case_ , snake_case_ , ) UpperCAmelCase_ = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any: '''simple docstring''' if not (len(snake_case_ ) > 0): raise ValueError("Must provide at least one input" ) UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )] UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] ) def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ ) UpperCAmelCase_ = None if _out is not None: UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) UpperCAmelCase_ = 1 for d in orig_batch_dims: flat_batch_dim *= d UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t UpperCAmelCase_ = 0 UpperCAmelCase_ = prepped_outputs for _ in range(snake_case_ ): # Chunk the input if not low_mem: UpperCAmelCase_ = _select_chunk else: UpperCAmelCase_ = partial( _chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , ) UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ ) # Run the layer on the chunk UpperCAmelCase_ = layer(**snake_case_ ) # Allocate space for the output if out is None: UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ ) # Put the chunk in its pre-allocated space if isinstance(snake_case_ , snake_case_ ): def assign(snake_case_ : dict , snake_case_ : dict ) -> None: for k, v in da.items(): if isinstance(snake_case_ , snake_case_ ): assign(snake_case_ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: UpperCAmelCase_ = da[k] assign(snake_case_ , snake_case_ ) elif isinstance(snake_case_ , snake_case_ ): for xa, xa in zip(snake_case_ , snake_case_ ): if _add_into_out: xa[i : i + chunk_size] += xa else: UpperCAmelCase_ = xa elif isinstance(snake_case_ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: UpperCAmelCase_ = output_chunk else: raise ValueError("Not supported" ) i += chunk_size UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ ) return out class __A : def __init__(self : Dict , __a : int = 512 , ): UpperCAmelCase_ = max_chunk_size UpperCAmelCase_ = None UpperCAmelCase_ = None def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ): logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size] UpperCAmelCase_ = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__a : int ) -> bool: try: with torch.no_grad(): fn(*__a , chunk_size=__a ) return True except RuntimeError: return False UpperCAmelCase_ = 0 UpperCAmelCase_ = len(__a ) - 1 while i > min_viable_chunk_size_index: UpperCAmelCase_ = test_chunk_size(candidates[i] ) if not viable: UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2 else: UpperCAmelCase_ = i UpperCAmelCase_ = (i + len(__a ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _lowercase (self : int , __a : Iterable , __a : Iterable ): UpperCAmelCase_ = True for aa, aa in zip(__a , __a ): assert type(__a ) == type(__a ) if isinstance(__a , (list, tuple) ): consistent &= self._compare_arg_caches(__a , __a ) elif isinstance(__a , __a ): UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] consistent &= self._compare_arg_caches(__a , __a ) else: consistent &= aa == aa return consistent def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ): UpperCAmelCase_ = True UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(__a ) UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a ) else: # Otherwise, we can reuse the precomputed value UpperCAmelCase_ = False if not consistent: UpperCAmelCase_ = self._determine_favorable_chunk_size( __a , __a , __a , ) UpperCAmelCase_ = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
78
0
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :] _lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Dict: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCAmelCase = cs.out[:-1] # Remove the final "\n" _lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text
18
'''simple docstring''' import copy import re class __A : a__ : Optional[int] = """hp""" a__ : Optional[Any] = {} a__ : List[Any] = None @classmethod def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ): UpperCAmelCase_ = prefix UpperCAmelCase_ = defaults cls.build_naming_info() @staticmethod def _lowercase (__a : List[Any] , __a : List[str] ): if len(__a ) == 0: return "" UpperCAmelCase_ = None if any(char.isdigit() for char in word ): raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__a ) + 1 ): UpperCAmelCase_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: UpperCAmelCase_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__a : Union[str, Any] ): UpperCAmelCase_ = "" while integer != 0: UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s UpperCAmelCase_ = 0 while True: UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a ) if sword in info["reverse_short_word"]: continue else: UpperCAmelCase_ = sword break UpperCAmelCase_ = short_word UpperCAmelCase_ = word return short_word @staticmethod def _lowercase (__a : List[str] , __a : Union[str, Any] ): UpperCAmelCase_ = param_name.split("_" ) UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name UpperCAmelCase_ = ["", "_"] for separator in separators: UpperCAmelCase_ = separator.join(__a ) if shortname not in info["reverse_short_param"]: UpperCAmelCase_ = shortname UpperCAmelCase_ = param_name return shortname return param_name @staticmethod def _lowercase (__a : int , __a : Union[str, Any] ): UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a ) UpperCAmelCase_ = short_name UpperCAmelCase_ = param_name @classmethod def _lowercase (cls : Any ): if cls.NAMING_INFO is not None: return UpperCAmelCase_ = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } UpperCAmelCase_ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__a , __a ) UpperCAmelCase_ = info @classmethod def _lowercase (cls : int , __a : Optional[int] ): cls.build_naming_info() assert cls.PREFIX is not None UpperCAmelCase_ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k] if isinstance(__a , __a ): UpperCAmelCase_ = 1 if v else 0 UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-" UpperCAmelCase_ = f"""{key}{sep}{v}""" name.append(__a ) return "_".join(__a ) @classmethod def _lowercase (cls : Dict , __a : Dict ): UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :] if repr == "": UpperCAmelCase_ = [] else: UpperCAmelCase_ = repr.split("_" ) UpperCAmelCase_ = {} for value in values: if "-" in value: UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" ) else: UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a ) UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) ) UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k] UpperCAmelCase_ = p_v for k in cls.DEFAULTS: if k not in parameters: UpperCAmelCase_ = cls.DEFAULTS[k] return parameters
78
0
"""simple docstring""" import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home _a = HUGGINGFACE_HUB_CACHE _a = """config.json""" _a = """diffusion_pytorch_model.bin""" _a = """diffusion_flax_model.msgpack""" _a = """model.onnx""" _a = """diffusion_pytorch_model.safetensors""" _a = """weights.pb""" _a = """https://huggingface.co""" _a = default_cache_path _a = """diffusers_modules""" _a = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules""")) _a = ["""fp16""", """non-ema"""] _a = """.self_attn"""
19
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): a__ : Tuple = ["""pixel_values"""] def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ): super().__init__(**__a ) UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_pad UpperCAmelCase_ = pad_size def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ): return rescale(__a , scale=__a , data_format=__a , **__a ) def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ): UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a ) UpperCAmelCase_ = (old_height // size + 1) * size - old_height UpperCAmelCase_ = (old_width // size + 1) * size - old_width return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a ) def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ): UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size UpperCAmelCase_ = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(__a ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images] if do_pad: UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images] UpperCAmelCase_ = {"pixel_values": images} return BatchFeature(data=__a , tensor_type=__a )
78
0
_lowerCAmelCase: dict[str, float] = { "km/h": 1.0, "m/s": 3.6, "mph": 1.609_344, "knot": 1.852, } _lowerCAmelCase: dict[str, float] = { "km/h": 1.0, "m/s": 0.277_777_778, "mph": 0.621_371_192, "knot": 0.539_956_803, } def _lowercase( __a : float , __a : str , __a : str ): if unit_to not in speed_chart or unit_from not in speed_chart_inverse: a__ =( f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n""" f"""Valid values are: {', '.join(__a )}""" ) raise ValueError(__a ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
20
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# SCREAMING_SNAKE_CASE_: Dict =[ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] SCREAMING_SNAKE_CASE_: List[Any] =[ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] SCREAMING_SNAKE_CASE_: Union[str, Any] =[] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0." unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}." SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1." unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0." unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}." SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1." unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv." SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op." unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0." SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}." unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.' SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}." SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}." unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: UpperCAmelCase_ = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# SCREAMING_SNAKE_CASE_: int =[ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}." vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0." SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample." vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0." SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample." vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}." vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}." SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}." vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) SCREAMING_SNAKE_CASE_: int =[ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple: '''simple docstring''' return w.reshape(*w.shape , 1 , 1 ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()} UpperCAmelCase_ = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# SCREAMING_SNAKE_CASE_: List[Any] =[ # (stable-diffusion, HF Diffusers) ('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'), ] SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst} SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2} def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = {} UpperCAmelCase_ = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): UpperCAmelCase_ = k[: -len(".q_proj.weight" )] UpperCAmelCase_ = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: UpperCAmelCase_ = [None, None, None] UpperCAmelCase_ = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): UpperCAmelCase_ = k[: -len(".q_proj.bias" )] UpperCAmelCase_ = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: UpperCAmelCase_ = [None, None, None] UpperCAmelCase_ = v continue UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = torch.cat(snake_case_ ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = torch.cat(snake_case_ ) return new_state_dict def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]: '''simple docstring''' return text_enc_dict if __name__ == "__main__": SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.' ) SCREAMING_SNAKE_CASE_: Dict =parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors') SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors') SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu') else: SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin') SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu') if osp.exists(vae_path): SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu') else: SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin') SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu') if osp.exists(text_enc_path): SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu') else: SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin') SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu') # Convert the UNet model SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict) SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict) SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()} SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict) SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()} else: SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict) SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict} torch.save(state_dict, args.checkpoint_path)
78
0
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ): try: __magic_name__ : Union[str, Any] =os.environ[key] except KeyError: # KEY isn't set, default to `default`. __magic_name__ : List[str] =default else: # KEY is set, convert it to True or False. try: __magic_name__ : int =strtobool(lowerCamelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value UpperCAmelCase_ : Dict = parse_flag_from_env("RUN_SLOW", default=False) UpperCAmelCase_ : str = parse_flag_from_env("RUN_REMOTE", default=False) UpperCAmelCase_ : int = parse_flag_from_env("RUN_LOCAL", default=True) UpperCAmelCase_ : Any = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression UpperCAmelCase_ : Any = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") UpperCAmelCase_ : Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio UpperCAmelCase_ : List[Any] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", ) # Beam UpperCAmelCase_ : str = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), reason="test requires apache-beam and a compatible dill version", ) # Dill-cloudpickle compatibility UpperCAmelCase_ : Optional[int] = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows UpperCAmelCase_ : Tuple = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) def lowerCAmelCase_ ( lowerCamelCase ): try: import faiss # noqa except ImportError: __magic_name__ : List[str] =unittest.skip("""test requires faiss""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( lowerCamelCase ): try: import regex # noqa except ImportError: __magic_name__ : Union[str, Any] =unittest.skip("""test requires regex""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( lowerCamelCase ): try: import elasticsearch # noqa except ImportError: __magic_name__ : Any =unittest.skip("""test requires elasticsearch""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( lowerCamelCase ): try: import sqlalchemy # noqa except ImportError: __magic_name__ : Tuple =unittest.skip("""test requires sqlalchemy""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( lowerCamelCase ): if not config.TORCH_AVAILABLE: __magic_name__ : List[Any] =unittest.skip("""test requires PyTorch""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( lowerCamelCase ): if not config.TF_AVAILABLE: __magic_name__ : List[str] =unittest.skip("""test requires TensorFlow""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( lowerCamelCase ): if not config.JAX_AVAILABLE: __magic_name__ : Union[str, Any] =unittest.skip("""test requires JAX""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( lowerCamelCase ): if not config.PIL_AVAILABLE: __magic_name__ : Optional[Any] =unittest.skip("""test requires Pillow""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( lowerCamelCase ): try: import transformers # noqa F401 except ImportError: return unittest.skip("""test requires transformers""" )(lowerCamelCase ) else: return test_case def lowerCAmelCase_ ( lowerCamelCase ): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("""test requires tiktoken""" )(lowerCamelCase ) else: return test_case def lowerCAmelCase_ ( lowerCamelCase ): try: import spacy # noqa F401 except ImportError: return unittest.skip("""test requires spacy""" )(lowerCamelCase ) else: return test_case def lowerCAmelCase_ ( lowerCamelCase ): def _require_spacy_model(lowerCamelCase ): try: import spacy # noqa F401 spacy.load(lowerCamelCase ) except ImportError: return unittest.skip("""test requires spacy""" )(lowerCamelCase ) except OSError: return unittest.skip("""test requires spacy model '{}'""".format(lowerCamelCase ) )(lowerCamelCase ) else: return test_case return _require_spacy_model def lowerCAmelCase_ ( lowerCamelCase ): try: import pyspark # noqa F401 except ImportError: return unittest.skip("""test requires pyspark""" )(lowerCamelCase ) else: return test_case def lowerCAmelCase_ ( lowerCamelCase ): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("""test requires joblibspark""" )(lowerCamelCase ) else: return test_case def lowerCAmelCase_ ( lowerCamelCase ): if not _run_slow_tests or _run_slow_tests == 0: __magic_name__ : List[str] =unittest.skip("""test is slow""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( lowerCamelCase ): if not _run_local_tests or _run_local_tests == 0: __magic_name__ : Any =unittest.skip("""test is local""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( lowerCamelCase ): if not _run_packaged_tests or _run_packaged_tests == 0: __magic_name__ : Dict =unittest.skip("""test is packaged""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( lowerCamelCase ): if not _run_remote_tests or _run_remote_tests == 0: __magic_name__ : Optional[Any] =unittest.skip("""test requires remote""" )(lowerCamelCase ) return test_case def lowerCAmelCase_ ( *lowerCamelCase ): def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(lowerCamelCase ) and name.startswith("""test""" ): for decorator in decorators: __magic_name__ : Optional[Any] =decorator(lowerCamelCase ) setattr(cls , lowerCamelCase , lowerCamelCase ) return cls return decorate class __A ( UpperCamelCase__ ): pass class __A ( UpperCamelCase__ ): UpperCamelCase = 0 UpperCamelCase = 1 UpperCamelCase = 2 @contextmanager def lowerCAmelCase_ ( lowerCamelCase=OfflineSimulationMode.CONNECTION_FAILS , lowerCamelCase=1E-16 ): __magic_name__ : Union[str, Any] =requests.Session().request def timeout_request(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ): # Change the url to an invalid url so that the connection hangs __magic_name__ : Tuple ="""https://10.255.255.1""" if kwargs.get("""timeout""" ) is None: raise RequestWouldHangIndefinitelyError( F"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) __magic_name__ : Tuple =timeout try: return online_request(lowerCamelCase , lowerCamelCase , **lowerCamelCase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __magic_name__ : Tuple =url __magic_name__ : List[str] =e.args[0] __magic_name__ : Optional[int] =(max_retry_error.args[0].replace("""10.255.255.1""" , F"OfflineMock[{url}]" ),) __magic_name__ : Union[str, Any] =(max_retry_error,) raise def raise_connection_error(lowerCamelCase , lowerCamelCase , **lowerCamelCase ): raise requests.ConnectionError("""Offline mode is enabled.""" , request=lowerCamelCase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("""requests.Session.send""" , lowerCamelCase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("""requests.Session.request""" , lowerCamelCase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowerCamelCase ): yield else: raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" ) @contextmanager def lowerCAmelCase_ ( *lowerCamelCase , **lowerCamelCase ): __magic_name__ : Any =str(Path().resolve() ) with tempfile.TemporaryDirectory(*lowerCamelCase , **lowerCamelCase ) as tmp_dir: try: os.chdir(lowerCamelCase ) yield finally: os.chdir(lowerCamelCase ) @contextmanager def lowerCAmelCase_ ( ): import gc gc.collect() __magic_name__ : int =pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowerCAmelCase_ ( ): import gc gc.collect() __magic_name__ : List[str] =pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): return deepcopy(lowerCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowerCamelCase ).integers(0 , 100 , 10 ).tolist() def lowerCAmelCase_ ( lowerCamelCase ): import decorator from requests.exceptions import HTTPError def _wrapper(lowerCamelCase , *lowerCamelCase , **lowerCamelCase ): try: return func(*lowerCamelCase , **lowerCamelCase ) except HTTPError as err: if str(lowerCamelCase ).startswith("""500""" ) or str(lowerCamelCase ).startswith("""502""" ): pytest.xfail(str(lowerCamelCase ) ) raise err return decorator.decorator(_wrapper , lowerCamelCase ) class __A : def __init__( self :Any , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :str ): '''simple docstring''' __magic_name__ : Optional[Any] =returncode __magic_name__ : Any =stdout __magic_name__ : Any =stderr async def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): while True: __magic_name__ : Any =await stream.readline() if line: callback(lowerCamelCase ) else: break async def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=False ): if echo: print("""\nRunning: """ , """ """.join(lowerCamelCase ) ) __magic_name__ : Optional[int] =await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __magic_name__ : Tuple =[] __magic_name__ : int =[] def tee(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="" ): __magic_name__ : Tuple =line.decode("""utf-8""" ).rstrip() sink.append(lowerCamelCase ) if not quiet: print(lowerCamelCase , lowerCamelCase , file=lowerCamelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stdout , label="""stdout:""" ) ), _read_stream(p.stderr , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stderr , label="""stderr:""" ) ), ] , timeout=lowerCamelCase , ) return _RunOutput(await p.wait() , lowerCamelCase , lowerCamelCase ) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=180 , lowerCamelCase=False , lowerCamelCase=True ): __magic_name__ : Tuple =asyncio.get_event_loop() __magic_name__ : Any =loop.run_until_complete( _stream_subprocess(lowerCamelCase , env=lowerCamelCase , stdin=lowerCamelCase , timeout=lowerCamelCase , quiet=lowerCamelCase , echo=lowerCamelCase ) ) __magic_name__ : Optional[int] =""" """.join(lowerCamelCase ) if result.returncode > 0: __magic_name__ : List[str] ="""\n""".join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F"'{cmd_str}' produced no output." ) return result def lowerCAmelCase_ ( ): __magic_name__ : Dict =os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" ) __magic_name__ : str =re.sub(R"""^gw""" , """""" , lowerCamelCase , 0 , re.M ) return int(lowerCamelCase ) def lowerCAmelCase_ ( ): __magic_name__ : Optional[int] =29500 __magic_name__ : int =pytest_xdist_worker_id() return port + uniq_delta
21
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float: '''simple docstring''' return np.dot(snake_case_ , snake_case_ ) class __A : def __init__(self : int , *, __a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ): UpperCAmelCase_ = regularization UpperCAmelCase_ = gamma if kernel == "linear": UpperCAmelCase_ = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma" ) if not isinstance(self.gamma , (float, int) ): raise ValueError("gamma must be float or int" ) if not self.gamma > 0: raise ValueError("gamma must be > 0" ) UpperCAmelCase_ = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: UpperCAmelCase_ = f"""Unknown kernel: {kernel}""" raise ValueError(__a ) def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ): return np.dot(__a , __a ) def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ): UpperCAmelCase_ = observations UpperCAmelCase_ = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((UpperCAmelCase_) , ) = np.shape(__a ) def to_minimize(__a : ndarray ) -> float: UpperCAmelCase_ = 0 ((UpperCAmelCase_) , ) = np.shape(__a ) for i in range(__a ): for j in range(__a ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(__a ) UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 ) UpperCAmelCase_ = Bounds(0 , self.regularization ) UpperCAmelCase_ = minimize( __a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x UpperCAmelCase_ = l_star # calculating mean offset of separation plane to points UpperCAmelCase_ = 0 for i in range(__a ): for j in range(__a ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) UpperCAmelCase_ = s / n def _lowercase (self : Optional[int] , __a : ndarray ): UpperCAmelCase_ = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , __a ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
78
0
'''simple docstring''' import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib _snake_case : Union[str, Any] = threading.Lock() _snake_case : Optional[logging.Handler] = None _snake_case : Dict = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } _snake_case : int = logging.WARNING _snake_case : Tuple = True def snake_case_ (): '''simple docstring''' _a = os.getenv('''TRANSFORMERS_VERBOSITY''' , UpperCamelCase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ' f'has to be one of: { ", ".join(log_levels.keys() ) }' ) return _default_log_level def snake_case_ (): '''simple docstring''' return __name__.split('''.''' )[0] def snake_case_ (): '''simple docstring''' return logging.getLogger(_get_library_name() ) def snake_case_ (): '''simple docstring''' global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _a = logging.StreamHandler() # Set sys.stderr as stream. _a = sys.stderr.flush # Apply our default configuration to the library root logger. _a = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) _a = False def snake_case_ (): '''simple docstring''' global _default_handler with _lock: if not _default_handler: return _a = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) _a = None def snake_case_ (): '''simple docstring''' return log_levels def snake_case_ (UpperCamelCase : Optional[str] = None ): '''simple docstring''' if name is None: _a = _get_library_name() _configure_library_root_logger() return logging.getLogger(UpperCamelCase ) def snake_case_ (): '''simple docstring''' _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def snake_case_ (UpperCamelCase : int ): '''simple docstring''' _configure_library_root_logger() _get_library_root_logger().setLevel(UpperCamelCase ) def snake_case_ (): '''simple docstring''' return set_verbosity(UpperCamelCase ) def snake_case_ (): '''simple docstring''' return set_verbosity(UpperCamelCase ) def snake_case_ (): '''simple docstring''' return set_verbosity(UpperCamelCase ) def snake_case_ (): '''simple docstring''' return set_verbosity(UpperCamelCase ) def snake_case_ (): '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def snake_case_ (): '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def snake_case_ (UpperCamelCase : logging.Handler ): '''simple docstring''' _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(UpperCamelCase ) def snake_case_ (UpperCamelCase : logging.Handler ): '''simple docstring''' _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(UpperCamelCase ) def snake_case_ (): '''simple docstring''' _configure_library_root_logger() _a = False def snake_case_ (): '''simple docstring''' _configure_library_root_logger() _a = True def snake_case_ (): '''simple docstring''' _a = _get_library_root_logger().handlers for handler in handlers: _a = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' ) handler.setFormatter(UpperCamelCase ) def snake_case_ (): '''simple docstring''' _a = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(UpperCamelCase ) def snake_case_ (self : List[str] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[int] ): '''simple docstring''' _a = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , UpperCamelCase ) if no_advisory_warnings: return self.warning(*UpperCamelCase , **UpperCamelCase ) _snake_case : Any = warning_advice @functools.lru_cache(UpperCamelCase ) def snake_case_ (self : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' self.warning(*UpperCamelCase , **UpperCamelCase ) _snake_case : Optional[int] = warning_once class A : def __init__( self : Optional[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[Any] ) -> int: # pylint: disable=unused-argument """simple docstring""" _a = args[0] if args else None def __iter__( self : str ) -> Any: """simple docstring""" return iter(self._iterator ) def __getattr__( self : List[str] , lowerCAmelCase_ : str ) -> str: """simple docstring""" def empty_fn(*lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : List[str] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : List[Any] ) -> int: """simple docstring""" return self def __exit__( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any ) -> Tuple: """simple docstring""" return class A : def __call__( self : Optional[Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : str ) -> List[Any]: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ ) else: return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : str , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Any: """simple docstring""" _a = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() _snake_case : Any = _tqdm_cls() def snake_case_ (): '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def snake_case_ (): '''simple docstring''' global _tqdm_active _a = True hf_hub_utils.enable_progress_bars() def snake_case_ (): '''simple docstring''' global _tqdm_active _a = False hf_hub_utils.disable_progress_bars()
22
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: List[Any] ={ 'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __A ( UpperCamelCase__ ): a__ : List[Any] = """perceiver""" def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ): super().__init__(**__a ) UpperCAmelCase_ = num_latents UpperCAmelCase_ = d_latents UpperCAmelCase_ = d_model UpperCAmelCase_ = num_blocks UpperCAmelCase_ = num_self_attends_per_block UpperCAmelCase_ = num_self_attention_heads UpperCAmelCase_ = num_cross_attention_heads UpperCAmelCase_ = qk_channels UpperCAmelCase_ = v_channels UpperCAmelCase_ = cross_attention_shape_for_attention UpperCAmelCase_ = self_attention_widening_factor UpperCAmelCase_ = cross_attention_widening_factor UpperCAmelCase_ = hidden_act UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = use_query_residual # masked language modeling attributes UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings # image classification attributes UpperCAmelCase_ = image_size # flow attributes UpperCAmelCase_ = train_size # multimodal autoencoding attributes UpperCAmelCase_ = num_frames UpperCAmelCase_ = audio_samples_per_frame UpperCAmelCase_ = samples_per_patch UpperCAmelCase_ = output_shape class __A ( UpperCamelCase__ ): @property def _lowercase (self : Dict ): if self.task == "multiple-choice": UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def _lowercase (self : Optional[Any] ): return 1E-4 def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(__a , __a ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a ) UpperCAmelCase_ = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) ) UpperCAmelCase_ = inputs.pop("input_ids" ) return inputs elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch ) UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a ) UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) ) UpperCAmelCase_ = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
78
0
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") snake_case__ : List[str] = logging.getLogger(__name__) @dataclass class _a : """simple docstring""" A_ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) A_ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) @dataclass class _a : """simple docstring""" A_ = field(default=UpperCAmelCase__ , metadata={"""help""": """The input training data file (a text file)."""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def _UpperCAmelCase ( self ) -> Union[str, Any]: if self.train_file is not None: UpperCamelCase_ = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: UpperCamelCase_ = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class _a : """simple docstring""" A_ = 42 A_ = True A_ = None A_ = None def __call__( self , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = 'label' if 'label' in features[0].keys() else 'labels' UpperCamelCase_ = [feature.pop(_UpperCAmelCase ) for feature in features] UpperCamelCase_ = len(_UpperCAmelCase ) UpperCamelCase_ = len(features[0]['input_ids'] ) UpperCamelCase_ = [ [{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features ] UpperCamelCase_ = list(chain(*_UpperCAmelCase ) ) UpperCamelCase_ = self.tokenizer.pad( _UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten UpperCamelCase_ = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()} # Add back labels UpperCamelCase_ = torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) return batch def _snake_case (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , __lowercase , __lowercase) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase_ = training_args.get_process_log_level() logger.setLevel(__lowercase) datasets.utils.logging.set_verbosity(__lowercase) transformers.utils.logging.set_verbosity(__lowercase) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""") logger.info(f"""Training/evaluation parameters {training_args}""") # Detecting last checkpoint. UpperCamelCase_ = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase_ = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.') elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: UpperCamelCase_ = {} if data_args.train_file is not None: UpperCamelCase_ = data_args.train_file if data_args.validation_file is not None: UpperCamelCase_ = data_args.validation_file UpperCamelCase_ = data_args.train_file.split('.')[-1] UpperCamelCase_ = load_dataset( __lowercase , data_files=__lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. UpperCamelCase_ = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase_ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. UpperCamelCase_ = [f"""ending{i}""" for i in range(4)] UpperCamelCase_ = 'sent1' UpperCamelCase_ = 'sent2' if data_args.max_seq_length is None: UpperCamelCase_ = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.') UpperCamelCase_ = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""") UpperCamelCase_ = min(data_args.max_seq_length , tokenizer.model_max_length) # Preprocessing the datasets. def preprocess_function(__lowercase): UpperCamelCase_ = [[context] * 4 for context in examples[context_name]] UpperCamelCase_ = examples[question_header_name] UpperCamelCase_ = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__lowercase) ] # Flatten out UpperCamelCase_ = list(chain(*__lowercase)) UpperCamelCase_ = list(chain(*__lowercase)) # Tokenize UpperCamelCase_ = tokenizer( __lowercase , __lowercase , truncation=__lowercase , max_length=__lowercase , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__lowercase) , 4)] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset') UpperCamelCase_ = raw_datasets['train'] if data_args.max_train_samples is not None: UpperCamelCase_ = min(len(__lowercase) , data_args.max_train_samples) UpperCamelCase_ = train_dataset.select(range(__lowercase)) with training_args.main_process_first(desc='train dataset map pre-processing'): UpperCamelCase_ = train_dataset.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset') UpperCamelCase_ = raw_datasets['validation'] if data_args.max_eval_samples is not None: UpperCamelCase_ = min(len(__lowercase) , data_args.max_eval_samples) UpperCamelCase_ = eval_dataset.select(range(__lowercase)) with training_args.main_process_first(desc='validation dataset map pre-processing'): UpperCamelCase_ = eval_dataset.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator UpperCamelCase_ = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__lowercase , pad_to_multiple_of=8 if training_args.fpaa else None) ) # Metric def compute_metrics(__lowercase): UpperCamelCase_ , UpperCamelCase_ = eval_predictions UpperCamelCase_ = np.argmax(__lowercase , axis=1) return {"accuracy": (preds == label_ids).astype(np.floataa).mean().item()} # Initialize our Trainer UpperCamelCase_ = Trainer( model=__lowercase , args=__lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , compute_metrics=__lowercase , ) # Training if training_args.do_train: UpperCamelCase_ = None if training_args.resume_from_checkpoint is not None: UpperCamelCase_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase_ = last_checkpoint UpperCamelCase_ = trainer.train(resume_from_checkpoint=__lowercase) trainer.save_model() # Saves the tokenizer too for easy upload UpperCamelCase_ = train_result.metrics UpperCamelCase_ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowercase) ) UpperCamelCase_ = min(__lowercase , len(__lowercase)) trainer.log_metrics('train' , __lowercase) trainer.save_metrics('train' , __lowercase) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***') UpperCamelCase_ = trainer.evaluate() UpperCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowercase) UpperCamelCase_ = min(__lowercase , len(__lowercase)) trainer.log_metrics('eval' , __lowercase) trainer.save_metrics('eval' , __lowercase) UpperCamelCase_ = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase) else: trainer.create_model_card(**__lowercase) def _snake_case (__lowercase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
23
'''simple docstring''' import requests def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None: '''simple docstring''' UpperCAmelCase_ = {"Content-Type": "application/json"} UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ ) if response.status_code != 2_00: UpperCAmelCase_ = ( "Request to slack returned an error " f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(snake_case_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
78
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCAmelCase ( unittest.TestCase): def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple: '''simple docstring''' __snake_case = size if size is not None else {'''shortest_edge''': 20} __snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __snake_case = parent __snake_case = batch_size __snake_case = num_channels __snake_case = image_size __snake_case = min_resolution __snake_case = max_resolution __snake_case = do_resize __snake_case = size __snake_case = do_center_crop __snake_case = crop_size __snake_case = do_flip_channel_order def lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase): __lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None def lowerCAmelCase ( self ) -> str: '''simple docstring''' __snake_case = MobileViTImageProcessingTester(self ) @property def lowerCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) ) def lowerCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass def lowerCAmelCase ( self ) -> str: '''simple docstring''' __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
24
'''simple docstring''' from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name class __A ( UpperCamelCase__ ): def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ): super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: UpperCAmelCase_ = ( f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a ) UpperCAmelCase_ = dict(scheduler.config ) UpperCAmelCase_ = 1 UpperCAmelCase_ = FrozenDict(__a ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: UpperCAmelCase_ = ( f"""The configuration file of this scheduler: {scheduler} has not set the configuration""" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a ) UpperCAmelCase_ = dict(scheduler.config ) UpperCAmelCase_ = True UpperCAmelCase_ = FrozenDict(__a ) if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , ) def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__a ) def _lowercase (self : int ): self.enable_attention_slicing(__a ) def _lowercase (self : Optional[Any] ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase_ = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(__a , __a ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowercase (self : Optional[int] ): if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__a , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ): UpperCAmelCase_ = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) UpperCAmelCase_ = self.segmentation_model(**__a ) UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size ) # Run inpainting pipeline with the generated mask UpperCAmelCase_ = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
78
0
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image a_ = ['text', 'image', 'audio'] def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : List[str] = [] for input_type in input_types: if input_type == "text": inputs.append("Text input") elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((512, 512))) elif input_type == "audio": inputs.append(torch.ones(3000)) elif isinstance(_a , _a): inputs.append(create_inputs(_a)) else: raise ValueError(f"Invalid type requested: {input_type}") return inputs def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : List[Any] = [] for output in outputs: if isinstance(_a , (str, AgentText)): output_types.append("text") elif isinstance(_a , (Image.Image, AgentImage)): output_types.append("image") elif isinstance(_a , (torch.Tensor, AgentAudio)): output_types.append("audio") else: raise ValueError(f"Invalid output: {output}") return output_types @is_tool_test class _UpperCamelCase : '''simple docstring''' def __UpperCamelCase ( self : Dict ) -> List[Any]: """simple docstring""" self.assertTrue(hasattr(self.tool , "inputs" ) ) self.assertTrue(hasattr(self.tool , "outputs" ) ) SCREAMING_SNAKE_CASE : List[str] = self.tool.inputs for _input in inputs: if isinstance(_input , a ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) SCREAMING_SNAKE_CASE : Optional[Any] = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def __UpperCamelCase ( self : List[str] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE : Dict = self.tool(*a ) # There is a single output if len(self.tool.outputs ) == 1: SCREAMING_SNAKE_CASE : Union[str, Any] = [outputs] self.assertListEqual(output_types(a ) , self.tool.outputs ) def __UpperCamelCase ( self : List[str] ) -> int: """simple docstring""" self.assertTrue(hasattr(self.tool , "description" ) ) self.assertTrue(hasattr(self.tool , "default_checkpoint" ) ) self.assertTrue(self.tool.description.startswith("This is a tool that" ) ) def __UpperCamelCase ( self : str ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE : Any = self.tool(*a ) if not isinstance(a , a ): SCREAMING_SNAKE_CASE : Dict = [outputs] self.assertEqual(len(a ) , len(self.tool.outputs ) ) for output, output_type in zip(a , self.tool.outputs ): SCREAMING_SNAKE_CASE : Any = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(a , a ) ) def __UpperCamelCase ( self : List[str] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE : List[Any] = [] for _input, input_type in zip(a , self.tool.inputs ): if isinstance(a , a ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error SCREAMING_SNAKE_CASE : Tuple = self.tool(*a ) if not isinstance(a , a ): SCREAMING_SNAKE_CASE : List[str] = [outputs] self.assertEqual(len(a ) , len(self.tool.outputs ) )
25
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int ) -> bool: '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
78
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) class _A ( __lowercase ): lowercase__: Any = '''encoder-decoder''' lowercase__: str = True def __init__( self : str , **__magic_name__ : int ) -> str: """simple docstring""" super().__init__(**__magic_name__ ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" __snake_case : Any = kwargs.pop("""encoder""" ) __snake_case : Union[str, Any] = encoder_config.pop("""model_type""" ) __snake_case : Optional[int] = kwargs.pop("""decoder""" ) __snake_case : Dict = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig __snake_case : Optional[Any] = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) __snake_case : Optional[Any] = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) __snake_case : List[str] = True @classmethod def lowercase__ ( cls : Optional[Any] , __magic_name__ : PretrainedConfig , __magic_name__ : PretrainedConfig , **__magic_name__ : Dict ) -> PretrainedConfig: """simple docstring""" logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) __snake_case : List[Any] = True __snake_case : Any = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ ) def lowercase__ ( self : Optional[int] ) -> str: """simple docstring""" __snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ ) __snake_case : int = self.encoder.to_dict() __snake_case : List[str] = self.decoder.to_dict() __snake_case : Tuple = self.__class__.model_type return output
26
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class __A : a__ : int a__ : TreeNode | None = None a__ : TreeNode | None = None SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess') def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int: '''simple docstring''' if root is None: return 0 # Validation def count_nodes(snake_case_ : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(snake_case_ : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(snake_case_ ) != count_coins(snake_case_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left ) UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right ) UpperCAmelCase_ = 1 - left_distrib_excess UpperCAmelCase_ = 1 - right_distrib_excess UpperCAmelCase_ = ( left_distrib_moves + right_distrib_moves + abs(snake_case_ ) + abs(snake_case_ ) ) UpperCAmelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(snake_case_ , snake_case_ ) return get_distrib(snake_case_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
78
0
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __A : Union[str, Any] = logging.get_logger(__name__) class lowerCamelCase( __snake_case ): '''simple docstring''' __magic_name__ = ['input_features', 'is_longer'] def __init__( self , snake_case_=64 , snake_case_=4_8000 , snake_case_=480 , snake_case_=10 , snake_case_=1024 , snake_case_=0.0 , snake_case_=False , snake_case_ = 0 , snake_case_ = 1_4000 , snake_case_ = None , snake_case_ = "fusion" , snake_case_ = "repeatpad" , **snake_case_ , ): super().__init__( feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , ) _A = top_db _A = truncation _A = padding _A = fft_window_size _A = (fft_window_size >> 1) + 1 _A = hop_length _A = max_length_s _A = max_length_s * sampling_rate _A = sampling_rate _A = frequency_min _A = frequency_max _A = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm=snake_case_ , mel_scale='htk' , ) _A = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm='slaney' , mel_scale='slaney' , ) def lowerCAmelCase__ ( self ): _A = copy.deepcopy(self.__dict__ ) _A = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ): _A = spectrogram( snake_case_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case_ , log_mel='dB' , ) return log_mel_spectrogram.T def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ): _A = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk _A = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk _A = [0] # randomly choose index for each part _A = np.random.choice(ranges[0] ) _A = np.random.choice(ranges[1] ) _A = np.random.choice(ranges[2] ) _A = mel[idx_front : idx_front + chunk_frames, :] _A = mel[idx_middle : idx_middle + chunk_frames, :] _A = mel[idx_back : idx_back + chunk_frames, :] _A = torch.tensor(mel[None, None, :] ) _A = torch.nn.functional.interpolate( snake_case_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case_ ) _A = mel_shrink[0][0].numpy() _A = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": _A = True # random crop to max_length (for compatibility) -> this should be handled by self.pad _A = len(snake_case_ ) - max_length _A = np.random.randint(0 , overflow + 1 ) _A = waveform[idx : idx + max_length] _A = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": _A = self._np_extract_fbank_features(snake_case_ , self.mel_filters ) _A = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed _A = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. _A = np.stack([mel, mel, mel, mel] , axis=0 ) _A = False else: _A = self._random_mel_fusion(snake_case_ , snake_case_ , snake_case_ ) _A = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented" ) else: _A = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": _A = int(max_length / len(snake_case_ ) ) _A = np.stack(np.tile(snake_case_ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": _A = int(max_length / len(snake_case_ ) ) _A = np.stack(np.tile(snake_case_ , snake_case_ ) ) _A = np.pad(snake_case_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": _A = self._np_extract_fbank_features(snake_case_ , self.mel_filters ) _A = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: _A = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , **snake_case_ , ): _A = truncation if truncation is not None else self.truncation _A = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _A = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) _A = is_batched_numpy or ( isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case_ , np.ndarray ): _A = np.asarray(snake_case_ , dtype=np.floataa ) elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _A = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _A = [np.asarray(snake_case_ )] # convert to mel spectrogram, truncate and pad if needed. _A = [ self._get_input_mel(snake_case_ , max_length if max_length else self.nb_max_samples , snake_case_ , snake_case_ ) for waveform in raw_speech ] _A = [] _A = [] for mel, longer in padded_inputs: input_mel.append(snake_case_ ) is_longer.append(snake_case_ ) if truncation == "fusion" and sum(snake_case_ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer _A = np.random.randint(0 , len(snake_case_ ) ) _A = True if isinstance(input_mel[0] , snake_case_ ): _A = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool _A = [[longer] for longer in is_longer] _A = {'input_features': input_mel, 'is_longer': is_longer} _A = BatchFeature(snake_case_ ) if return_tensors is not None: _A = input_features.convert_to_tensors(snake_case_ ) return input_features
27
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE_: int =logging.getLogger() def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ = parser.parse_args() return args.f def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" ) if os.path.exists(snake_case_ ): with open(snake_case_ , "r" ) as f: UpperCAmelCase_ = json.load(snake_case_ ) else: raise ValueError(f"""can't find {path}""" ) return results def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __A ( UpperCamelCase__ ): @classmethod def _lowercase (cls : Any ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def _lowercase (cls : int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : int ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 28 ) self.assertGreaterEqual(result["eval_exact"] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : str ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_rouge1"] , 10 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : List[str] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_bleu"] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) ) @slow def _lowercase (self : Dict ): UpperCAmelCase_ = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Any ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
78
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase_ = { "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
28
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP SCREAMING_SNAKE_CASE_: Any =False try: SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab') except ModuleNotFoundError: pass @input.register class __A : def __init__(self : int , __a : str = None , __a : list = [] ): UpperCAmelCase_ = 0 UpperCAmelCase_ = choices UpperCAmelCase_ = prompt if sys.platform == "win32": UpperCAmelCase_ = "*" else: UpperCAmelCase_ = "➔ " def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ): if sys.platform != "win32": writeColor(self.choices[index] , 32 , __a ) else: forceWrite(self.choices[index] , __a ) def _lowercase (self : Any , __a : int ): if index == self.position: forceWrite(f""" {self.arrow_char} """ ) self.write_choice(__a ) else: forceWrite(f""" {self.choices[index]}""" ) reset_cursor() def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ): UpperCAmelCase_ = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__a ) move_cursor(__a , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def _lowercase (self : Dict ): self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def _lowercase (self : Any ): self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def _lowercase (self : Optional[Any] ): move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def _lowercase (self : str ): move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = int(chr(self.current_selection ) ) UpperCAmelCase_ = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , __a ) else: return else: return def _lowercase (self : Optional[Any] , __a : int = 0 ): if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) UpperCAmelCase_ = default_choice for i in range(len(self.choices ) ): self.print_choice(__a ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: UpperCAmelCase_ = int(builtins.input() ) except ValueError: UpperCAmelCase_ = default_choice else: UpperCAmelCase_ = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(__a , "\n" ) return choice
78
0
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A_ = logging.get_logger(__name__) A_ = { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class __lowerCamelCase ( lowerCAmelCase ): a__: Any = 'blenderbot-small' a__: Tuple = ['past_key_values'] a__: Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , UpperCAmelCase=5_0265 , UpperCAmelCase=512 , UpperCAmelCase=8 , UpperCAmelCase=2048 , UpperCAmelCase=16 , UpperCAmelCase=8 , UpperCAmelCase=2048 , UpperCAmelCase=16 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=512 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1 , UpperCAmelCase=False , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=2 , **UpperCAmelCase , ): lowerCamelCase_ = vocab_size lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = d_model lowerCamelCase_ = encoder_ffn_dim lowerCamelCase_ = encoder_layers lowerCamelCase_ = encoder_attention_heads lowerCamelCase_ = decoder_ffn_dim lowerCamelCase_ = decoder_layers lowerCamelCase_ = decoder_attention_heads lowerCamelCase_ = dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = activation_function lowerCamelCase_ = init_std lowerCamelCase_ = encoder_layerdrop lowerCamelCase_ = decoder_layerdrop lowerCamelCase_ = use_cache lowerCamelCase_ = encoder_layers lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , ) class __lowerCamelCase ( lowerCAmelCase ): @property def UpperCAmelCase__ ( self ): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCamelCase_ = {0: '''batch'''} lowerCamelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCamelCase_ = {0: '''batch''', 1: '''decoder_sequence'''} lowerCamelCase_ = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCamelCase_ , lowerCamelCase_ = self.num_layers for i in range(UpperCAmelCase ): lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCamelCase_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def UpperCAmelCase__ ( self ): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ = super().outputs else: lowerCamelCase_ = super(UpperCAmelCase , self ).outputs if self.use_past: lowerCamelCase_ , lowerCamelCase_ = self.num_layers for i in range(UpperCAmelCase ): lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ): lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Generate decoder inputs lowerCamelCase_ = seq_length if not self.use_past else 1 lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) lowerCamelCase_ = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase_ = dict(**UpperCAmelCase , **UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCamelCase_ , lowerCamelCase_ = common_inputs['''input_ids'''].shape lowerCamelCase_ = common_inputs['''decoder_input_ids'''].shape[1] lowerCamelCase_ , lowerCamelCase_ = self.num_attention_heads lowerCamelCase_ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase_ = decoder_seq_length + 3 lowerCamelCase_ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase_ = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(UpperCAmelCase , UpperCAmelCase )] , dim=1 ) lowerCamelCase_ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase_ , lowerCamelCase_ = self.num_layers lowerCamelCase_ = min(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase_ = max(UpperCAmelCase , UpperCAmelCase ) - min_num_layers lowerCamelCase_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase ), ) ) # TODO: test this. lowerCamelCase_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(UpperCAmelCase , UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) ) return common_inputs def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ): lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCamelCase_ , lowerCamelCase_ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCamelCase_ = seqlen + 2 lowerCamelCase_ , lowerCamelCase_ = self.num_layers lowerCamelCase_ , lowerCamelCase_ = self.num_attention_heads lowerCamelCase_ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase_ = common_inputs['''attention_mask'''].dtype lowerCamelCase_ = torch.cat( [common_inputs['''attention_mask'''], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 ) lowerCamelCase_ = [ (torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(UpperCAmelCase ) ] return common_inputs def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCamelCase_ = compute_effective_axis_dimension( UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase_ = tokenizer.num_special_tokens_to_add(UpperCAmelCase ) lowerCamelCase_ = compute_effective_axis_dimension( UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase_ = dict(tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase ) ) return common_inputs def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase ) elif self.task == "causal-lm": lowerCamelCase_ = self._generate_dummy_inputs_for_causal_lm( UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase ) else: lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase ) return common_inputs def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ = super()._flatten_past_key_values_(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: lowerCamelCase_ = super(UpperCAmelCase , self )._flatten_past_key_values_( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
29
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor'] SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =[ 'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: int =[ 'FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
78
0
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] __a = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : int = torch.load(_lowercase , map_location='''cpu''' ) return sd def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=rename_keys_prefix ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = OrderedDict() UpperCAmelCase_ : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue UpperCAmelCase_ : Tuple = key for name_pair in rename_keys_prefix: UpperCAmelCase_ : Optional[int] = new_key.replace(name_pair[0] , name_pair[1] ) UpperCAmelCase_ : Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately UpperCAmelCase_ : Tuple = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: UpperCAmelCase_ : Dict = '''pretraining''' if "vcr" in checkpoint_path: UpperCAmelCase_ : Optional[int] = {'''visual_embedding_dim''': 512} elif "vqa_advanced" in checkpoint_path: UpperCAmelCase_ : Any = {'''visual_embedding_dim''': 2048} elif "vqa" in checkpoint_path: UpperCAmelCase_ : Any = {'''visual_embedding_dim''': 2048} elif "nlvr" in checkpoint_path: UpperCAmelCase_ : str = {'''visual_embedding_dim''': 1024} else: raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: UpperCAmelCase_ : str = {'''visual_embedding_dim''': 512} UpperCAmelCase_ : List[Any] = '''multichoice''' elif "vqa_advanced" in checkpoint_path: UpperCAmelCase_ : Optional[int] = {'''visual_embedding_dim''': 2048} UpperCAmelCase_ : List[Any] = '''vqa_advanced''' elif "vqa" in checkpoint_path: UpperCAmelCase_ : str = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129} UpperCAmelCase_ : Union[str, Any] = '''vqa''' elif "nlvr" in checkpoint_path: UpperCAmelCase_ : Union[str, Any] = { '''visual_embedding_dim''': 1024, '''num_labels''': 2, } UpperCAmelCase_ : Optional[int] = '''nlvr''' UpperCAmelCase_ : Dict = VisualBertConfig(**_lowercase ) # Load State Dict UpperCAmelCase_ : Any = load_state_dict(_lowercase ) UpperCAmelCase_ : Any = get_new_dict(_lowercase , _lowercase ) if model_type == "pretraining": UpperCAmelCase_ : Dict = VisualBertForPreTraining(_lowercase ) elif model_type == "vqa": UpperCAmelCase_ : str = VisualBertForQuestionAnswering(_lowercase ) elif model_type == "nlvr": UpperCAmelCase_ : Any = VisualBertForVisualReasoning(_lowercase ) elif model_type == "multichoice": UpperCAmelCase_ : Union[str, Any] = VisualBertForMultipleChoice(_lowercase ) model.load_state_dict(_lowercase ) # Save Checkpoints Path(_lowercase ).mkdir(exist_ok=_lowercase ) model.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') __a = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
30
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed SCREAMING_SNAKE_CASE_: Any ={ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCAmelCase_ ( snake_case_ : Any ) -> str: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False elif args.student_type == "gpt2": UpperCAmelCase_ = False def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser(description="Training" ) parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." ) parser.add_argument( "--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." ) parser.add_argument( "--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." ) parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." ) parser.add_argument( "--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." ) parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." ) parser.add_argument( "--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." ) parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." ) parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." ) parser.add_argument( "--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." ) parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." ) parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." ) parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." ) parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." ) parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." ) parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." ) parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=snake_case_ , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." ) parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" ) parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" ) parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." ) parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." ) UpperCAmelCase_ = parser.parse_args() sanity_checks(snake_case_ ) # ARGS # init_gpu_params(snake_case_ ) set_seed(snake_case_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f: json.dump(vars(snake_case_ ) , snake_case_ , indent=4 ) git_log(args.dump_path ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCAmelCase_ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ ) UpperCAmelCase_ = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) UpperCAmelCase_ = special_tok_ids UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCAmelCase_ = 0.0 # do not predict special tokens UpperCAmelCase_ = torch.from_numpy(snake_case_ ) else: UpperCAmelCase_ = None UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ ) logger.info("Data loader created." ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config ) UpperCAmelCase_ = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ ) else: UpperCAmelCase_ = student_model_class(snake_case_ ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info("Student loaded." ) # TEACHER # UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case_ , snake_case_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case_ , snake_case_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCAmelCase_ = Distiller( params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ ) distiller.train() logger.info("Let's go get some drinks." ) if __name__ == "__main__": main()
78
0
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowerCamelCase__ : int = datasets.utils.logging.get_logger(__name__) lowerCamelCase__ : str = ['names', 'prefix'] lowerCamelCase__ : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowerCamelCase__ : Optional[int] = ['encoding_errors', 'on_bad_lines'] lowerCamelCase__ : str = ['date_format'] @dataclass class lowerCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' lowercase_ = "," lowercase_ = None lowercase_ = "infer" lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = True lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = False lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = True lowercase_ = True lowercase_ = False lowercase_ = True lowercase_ = None lowercase_ = "." lowercase_ = None lowercase_ = '"' lowercase_ = 0 lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = True lowercase_ = True lowercase_ = 0 lowercase_ = True lowercase_ = False lowercase_ = None lowercase_ = 10_000 lowercase_ = None lowercase_ = "strict" lowercase_ = "error" lowercase_ = None def lowerCAmelCase_ ( self : Any ): if self.delimiter is not None: SCREAMING_SNAKE_CASE_ = self.delimiter if self.column_names is not None: SCREAMING_SNAKE_CASE_ = self.column_names @property def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCAmelCase ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowercase_ = CsvConfig def lowerCAmelCase_ ( self : Tuple ): return datasets.DatasetInfo(features=self.config.features ) def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Optional[int] ): if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCAmelCase , (str, list, tuple) ): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={'files': files} ) ) return splits def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : pa.Table ): if self.config.features is not None: SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema if all(not require_storage_cast(_lowerCAmelCase ) for feature in self.config.features.values() ): # cheaper cast SCREAMING_SNAKE_CASE_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCAmelCase ) else: # more expensive cast; allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_lowerCAmelCase , _lowerCAmelCase ) return pa_table def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any] ): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str SCREAMING_SNAKE_CASE_ = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCAmelCase ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ): SCREAMING_SNAKE_CASE_ = pd.read_csv(_lowerCAmelCase , iterator=_lowerCAmelCase , dtype=_lowerCAmelCase , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = pa.Table.from_pandas(_lowerCAmelCase ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase ) except ValueError as e: logger.error(F"Failed to read file '{file}' with error {type(_lowerCAmelCase )}: {e}" ) raise
31
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : int = AutoencoderKL a__ : Optional[Any] = """sample""" a__ : Union[str, Any] = 1e-2 @property def _lowercase (self : Optional[int] ): UpperCAmelCase_ = 4 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a ) return {"sample": image} @property def _lowercase (self : Any ): return (3, 32, 32) @property def _lowercase (self : Dict ): return (3, 32, 32) def _lowercase (self : int ): UpperCAmelCase_ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict def _lowercase (self : int ): pass def _lowercase (self : int ): pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _lowercase (self : List[Any] ): # enable deterministic behavior for gradient checkpointing UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ = self.model_class(**__a ) model.to(__a ) assert not model.is_gradient_checkpointing and model.training UpperCAmelCase_ = model(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() UpperCAmelCase_ = torch.randn_like(__a ) UpperCAmelCase_ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing UpperCAmelCase_ = self.model_class(**__a ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__a ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training UpperCAmelCase_ = model_a(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() UpperCAmelCase_ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) UpperCAmelCase_ = dict(model.named_parameters() ) UpperCAmelCase_ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _lowercase (self : Any ): UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a ) self.assertIsNotNone(__a ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__a ) UpperCAmelCase_ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _lowercase (self : List[str] ): UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) UpperCAmelCase_ = model.to(__a ) model.eval() if torch_device == "mps": UpperCAmelCase_ = torch.manual_seed(0 ) else: UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase_ = image.to(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": UpperCAmelCase_ = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": UpperCAmelCase_ = torch.tensor( [-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] ) else: UpperCAmelCase_ = torch.tensor( [-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] ) self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) ) @slow class __A ( unittest.TestCase ): def _lowercase (self : Dict , __a : Dict , __a : int ): return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy""" def _lowercase (self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ): UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a ) return image def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ): UpperCAmelCase_ = "fp16" if fpaa else None UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ = AutoencoderKL.from_pretrained( __a , subfolder="vae" , torch_dtype=__a , revision=__a , ) model.to(__a ).eval() return model def _lowercase (self : List[Any] , __a : List[Any]=0 ): if torch_device == "mps": return torch.manual_seed(__a ) return torch.Generator(device=__a ).manual_seed(__a ) @parameterized.expand( [ # fmt: off [33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__a , __a , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]], [47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : Dict , __a : Optional[int] , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__a , __a , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]], [37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : int , __a : int , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]], [16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase (self : List[str] , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase (self : Union[str, Any] , __a : Dict ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]], [47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]], # fmt: on ] ) def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model.encode(__a ).latent_dist UpperCAmelCase_ = dist.sample(generator=__a ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu() UpperCAmelCase_ = torch.tensor(__a ) UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(__a , __a , atol=__a )
78
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): @slow def UpperCamelCase( self ): _UpperCAmelCase = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' ) _UpperCAmelCase = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _UpperCAmelCase = model(_UpperCamelCase )['''last_hidden_state'''] _UpperCAmelCase = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _UpperCamelCase ) # compare the actual values for a slice. _UpperCAmelCase = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
32
'''simple docstring''' import logging from transformers import PretrainedConfig SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__) SCREAMING_SNAKE_CASE_: Any ={ 'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json', } class __A ( UpperCamelCase__ ): a__ : List[Any] = """bertabs""" def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ): super().__init__(**__a ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_pos UpperCAmelCase_ = enc_layers UpperCAmelCase_ = enc_hidden_size UpperCAmelCase_ = enc_heads UpperCAmelCase_ = enc_ff_size UpperCAmelCase_ = enc_dropout UpperCAmelCase_ = dec_layers UpperCAmelCase_ = dec_hidden_size UpperCAmelCase_ = dec_heads UpperCAmelCase_ = dec_ff_size UpperCAmelCase_ = dec_dropout
78
0
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Union[str, Any] = ['image_processor', 'tokenizer'] __lowercase : Any = 'OwlViTImageProcessor' __lowercase : Optional[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self:Optional[int] , _a:List[Any]=None , _a:str=None , **_a:Any ): snake_case__ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) snake_case__ = kwargs.pop('''feature_extractor''' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) def __call__( self:Dict , _a:Any=None , _a:Optional[Any]=None , _a:Tuple=None , _a:List[str]="max_length" , _a:Dict="np" , **_a:Any ): if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(_a , _a ) or (isinstance(_a , _a ) and not isinstance(text[0] , _a )): snake_case__ = [self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )] elif isinstance(_a , _a ) and isinstance(text[0] , _a ): snake_case__ = [] # Maximum number of queries across batch snake_case__ = max([len(_a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_a ) != max_num_queries: snake_case__ = t + [''' '''] * (max_num_queries - len(_a )) snake_case__ = self.tokenizer(_a , padding=_a , return_tensors=_a , **_a ) encodings.append(_a ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": snake_case__ = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) snake_case__ = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp snake_case__ = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) snake_case__ = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch snake_case__ = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) snake_case__ = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf snake_case__ = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) snake_case__ = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) snake_case__ = BatchEncoding() snake_case__ = input_ids snake_case__ = attention_mask if query_images is not None: snake_case__ = BatchEncoding() snake_case__ = self.image_processor( _a , return_tensors=_a , **_a ).pixel_values snake_case__ = query_pixel_values if images is not None: snake_case__ = self.image_processor(_a , return_tensors=_a , **_a ) if text is not None and images is not None: snake_case__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: snake_case__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , *_a:Tuple , **_a:Optional[Any] ): return self.image_processor.post_process(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , *_a:int , **_a:Optional[Any] ): return self.image_processor.post_process_object_detection(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Any , *_a:Any , **_a:List[Any] ): return self.image_processor.post_process_image_guided_detection(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , *_a:Optional[Any] , **_a:List[str] ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , *_a:int , **_a:str ): return self.tokenizer.decode(*_a , **_a ) @property def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
33
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "imagenet-1k-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = {v: k for k, v in idalabel.items()} UpperCAmelCase_ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" UpperCAmelCase_ = BitConfig( conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , ) return config def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if "stem.conv" in name: UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: UpperCAmelCase_ = name.replace("blocks" , "layers" ) if "head.fc" in name: UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): UpperCAmelCase_ = "bit." + name if "bit" not in name and "classifier" not in name: UpperCAmelCase_ = "bit.encoder." + name return name def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = get_config(snake_case_ ) # load original model from timm UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ ) timm_model.eval() # load state_dict of original model UpperCAmelCase_ = timm_model.state_dict() for key in state_dict.copy().keys(): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val.squeeze() if "head" in key else val # load HuggingFace model UpperCAmelCase_ = BitForImageClassification(snake_case_ ) model.eval() model.load_state_dict(snake_case_ ) # create image processor UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) ) UpperCAmelCase_ = transform.transforms UpperCAmelCase_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } UpperCAmelCase_ = BitImageProcessor( do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ) UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(snake_case_ , snake_case_ ) # verify logits with torch.no_grad(): UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) UpperCAmelCase_ = timm_model(snake_case_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) if push_to_hub: print(f"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(f"""ybelkada/{model_name}""" ) processor.push_to_hub(f"""ybelkada/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
78
0
"""simple docstring""" from __future__ import annotations from statistics import mean def __snake_case ( _lowercase ,_lowercase ,_lowercase ): """simple docstring""" UpperCamelCase = [0] * no_of_processes UpperCamelCase = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(_lowercase ): UpperCamelCase = burst_time[i] UpperCamelCase = [] UpperCamelCase = 0 UpperCamelCase = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: UpperCamelCase = [] UpperCamelCase = -1 for i in range(_lowercase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(_lowercase ) if len(_lowercase ) > 0: UpperCamelCase = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: UpperCamelCase = i total_time += burst_time[target_process] completed += 1 UpperCamelCase = 0 UpperCamelCase = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def __snake_case ( _lowercase ,_lowercase ,_lowercase ): """simple docstring""" UpperCamelCase = [0] * no_of_processes for i in range(_lowercase ): UpperCamelCase = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print('[TEST CASE 01]') SCREAMING_SNAKE_CASE_ = 4 SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7] SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0] SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time') for i, process_id in enumerate(list(range(1, 5))): print( f'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t' f'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}' ) print(f'\nAverage waiting time = {mean(waiting_time):.5f}') print(f'Average turnaround time = {mean(turn_around_time):.5f}')
34
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): UpperCAmelCase_ = 0 def _lowercase (self : Tuple ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" ) self.assertIsInstance(__a , __a ) def _lowercase (self : str ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = CLIPConfig() # Create a dummy config file with image_proceesor_type UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict() config_dict.pop("image_processor_type" ) UpperCAmelCase_ = CLIPImageProcessor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) # make sure private variable is not incorrectly saved UpperCAmelCase_ = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def _lowercase (self : int ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Tuple ): with self.assertRaisesRegex( __a , "clip-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" ) def _lowercase (self : Optional[int] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" ) def _lowercase (self : Union[str, Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" ) def _lowercase (self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" ) def _lowercase (self : Optional[int] ): try: AutoConfig.register("custom" , __a ) AutoImageProcessor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoImageProcessor.register(__a , __a ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _lowercase (self : Optional[int] ): class __A ( UpperCamelCase__ ): a__ : str = True try: AutoConfig.register("custom" , __a ) AutoImageProcessor.register(__a , __a ) # If remote code is not set, the default is to use local UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
78
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( _UpperCAmelCase , unittest.TestCase ): lowerCamelCase : Tuple = GPTaTokenizer lowerCamelCase : Dict = GPTaTokenizerFast lowerCamelCase : Any = True lowerCamelCase : Tuple = {'''add_prefix_space''': True} lowerCamelCase : Dict = False def lowercase__ ( self : str ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ : int = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] SCREAMING_SNAKE_CASE__ : List[str] = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] SCREAMING_SNAKE_CASE__ : Tuple = {'''unk_token''': '''<unk>'''} SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_lowercase ) ) def lowercase__ ( self : int , **_lowercase : Any ): kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def lowercase__ ( self : Optional[int] , **_lowercase : Tuple ): kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase ) def lowercase__ ( self : Any , _lowercase : Optional[Any] ): SCREAMING_SNAKE_CASE__ : str = '''lower newer''' SCREAMING_SNAKE_CASE__ : Optional[int] = '''lower newer''' return input_text, output_text def lowercase__ ( self : int ): SCREAMING_SNAKE_CASE__ : int = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE__ : int = '''lower newer''' SCREAMING_SNAKE_CASE__ : int = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize(_lowercase , add_prefix_space=_lowercase ) self.assertListEqual(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ : Tuple = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase ) def lowercase__ ( self : int ): if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] = '''lower newer''' # Testing tokenization SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize(_lowercase , add_prefix_space=_lowercase ) SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.tokenize(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase ) SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) self.assertListEqual(_lowercase , _lowercase ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE__ : Any = self.get_rust_tokenizer(add_prefix_space=_lowercase ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(_lowercase , add_prefix_space=_lowercase ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) # Testing the unknown token SCREAMING_SNAKE_CASE__ : Optional[int] = tokens + [rust_tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Dict = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase ) def lowercase__ ( self : List[str] , *_lowercase : Union[str, Any] , **_lowercase : str ): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def lowercase__ ( self : str , _lowercase : Optional[int]=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): SCREAMING_SNAKE_CASE__ : Dict = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) # Simple input SCREAMING_SNAKE_CASE__ : List[str] = '''This is a simple input''' SCREAMING_SNAKE_CASE__ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2'''] SCREAMING_SNAKE_CASE__ : Optional[Any] = ('''This is a simple input''', '''This is a pair''') SCREAMING_SNAKE_CASE__ : Dict = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='''max_length''' ) # Simple input self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' ) # Simple input self.assertRaises( _lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' , ) # Pair input self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='''max_length''' ) # Pair input self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' ) # Pair input self.assertRaises( _lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' , ) def lowercase__ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''This is a simple input''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''This is a simple input looooooooong''', '''This is a simple input'''] SCREAMING_SNAKE_CASE__ : str = ('''This is a simple input''', '''This is a pair''') SCREAMING_SNAKE_CASE__ : List[Any] = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_lowercase , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) SCREAMING_SNAKE_CASE__ : int = tokenizer(_lowercase , padding=_lowercase , truncate=_lowercase , return_tensors='''np''' ) SCREAMING_SNAKE_CASE__ : int = tokenizer(*_lowercase , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) SCREAMING_SNAKE_CASE__ : str = tokenizer(_lowercase , padding=_lowercase , truncate=_lowercase , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def lowercase__ ( self : int ): SCREAMING_SNAKE_CASE__ : Optional[Any] = '''$$$''' SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_lowercase , add_bos_token=_lowercase ) SCREAMING_SNAKE_CASE__ : int = '''This is a simple input''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2'''] SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.bos_token_id SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(_lowercase ) self.assertEqual(out_s.input_ids[0] , _lowercase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(out_s.input_ids ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _lowercase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def lowercase__ ( self : Optional[Any] ): pass def lowercase__ ( self : int ): # TODO: change to self.get_tokenizers() when the fast version is implemented SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.get_tokenizer(do_lower_case=_lowercase , add_bos_token=_lowercase )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): SCREAMING_SNAKE_CASE__ : Optional[Any] = '''Encode this.''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '''This one too please.''' SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) encoded_sequence += tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus( _lowercase , _lowercase , add_special_tokens=_lowercase , return_special_tokens_mask=_lowercase , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoded_sequence_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ : List[str] = encoded_sequence_dict['''special_tokens_mask'''] self.assertEqual(len(_lowercase ) , len(_lowercase ) ) SCREAMING_SNAKE_CASE__ : Any = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(_lowercase ) ] SCREAMING_SNAKE_CASE__ : Tuple = [x for x in filtered_sequence if x is not None] self.assertEqual(_lowercase , _lowercase ) @require_tokenizers class lowercase ( unittest.TestCase ): def lowercase__ ( self : List[str] ): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_lowercase ) SCREAMING_SNAKE_CASE__ : Tuple = '''A photo of a cat''' SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode( _lowercase , ) self.assertEqual(_lowercase , [2, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained('''test_opt''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained('''./test_opt''' ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode( _lowercase , ) self.assertEqual(_lowercase , [2, 2_50, 13_45, 9, 10, 47_58] ) def lowercase__ ( self : List[str] ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = '''A photo of a cat''' SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode( _lowercase , ) # Same as above self.assertEqual(_lowercase , [2, 2_50, 13_45, 9, 10, 47_58] ) @unittest.skip('''This test is failing because of a bug in the fast tokenizer''' ) def lowercase__ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_lowercase ) SCREAMING_SNAKE_CASE__ : Tuple = '''bos''' SCREAMING_SNAKE_CASE__ : str = tokenizer.get_vocab()['''bos'''] SCREAMING_SNAKE_CASE__ : Optional[int] = '''A photo of a cat''' SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode( _lowercase , ) # We changed the bos token self.assertEqual(_lowercase , [3_19_57, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained('''./tok''' ) SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained('''./tok''' ) self.assertTrue(tokenizer.is_fast ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode( _lowercase , ) self.assertEqual(_lowercase , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
35
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False @dataclass class __A : a__ : Optional[int] = None a__ : bool = True a__ : bool = True a__ : Optional[str] = None # Automatically constructed a__ : ClassVar[str] = "dict" a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ ) def __call__(self : Optional[Any] ): return self.pa_type def _lowercase (self : str , __a : Union[str, bytes, dict] ): try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(__a , __a ): return {"bytes": None, "path": value} elif isinstance(__a , __a ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes UpperCAmelCase_ = BytesIO() sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767 else: UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767 UpperCAmelCase_ = BytesIO(bytes() ) sf.write(__a , __a , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ): if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: UpperCAmelCase_ = token_per_repo_id or {} UpperCAmelCase_ = path.split("::" )[-1] try: UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"] UpperCAmelCase_ = token_per_repo_id[repo_id] except (ValueError, KeyError): UpperCAmelCase_ = None with xopen(__a , "rb" , use_auth_token=__a ) as f: UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a ) else: UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a ) UpperCAmelCase_ = array.T if self.mono: UpperCAmelCase_ = librosa.to_mono(__a ) if self.sampling_rate and self.sampling_rate != sampling_rate: UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate ) UpperCAmelCase_ = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def _lowercase (self : Dict ): from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ): if pa.types.is_string(storage.type ): UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() ) UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: UpperCAmelCase_ = storage.field("bytes" ) else: UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: UpperCAmelCase_ = storage.field("path" ) else: UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) return array_cast(__a , self.pa_type ) def _lowercase (self : Dict , __a : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(__a : Tuple ): with xopen(__a , "rb" ) as f: UpperCAmelCase_ = f.read() return bytes_ UpperCAmelCase_ = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) UpperCAmelCase_ = pa.array( [os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(__a , self.pa_type )
78
0
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _A ( unittest.TestCase ): '''simple docstring''' __lowerCamelCase : int = JukeboxTokenizer __lowerCamelCase : int = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def snake_case_ ( self ): '''simple docstring''' import torch snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) snake_case : List[Any] = tokenizer(**self.metas )["""input_ids"""] # fmt: off snake_case : int = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) ) @require_torch def snake_case_ ( self ): '''simple docstring''' import torch snake_case : str = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) snake_case : Any = tokenizer(**self.metas )["""input_ids"""] # fmt: off snake_case : Tuple = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
36
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" ) UpperCAmelCase_ = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ ) return image def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if "visual_encoder" in key: UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ ) if "blocks" in key: UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ ) if "attn" in key: UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ ) if "norm1" in key: UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ ) if "norm2" in key: UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ ) if "encoder.norm" in key: UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ ) if "encoder.patch_embed.proj" in key: UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ ) if "encoder.pos_embed" in key: UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ ) if "encoder.cls_token" in key: UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ ) if "self_attn" in key: UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ ) return key @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ ) else: UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} ) UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval() UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth" UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" ) UpperCAmelCase_ = pt_model.eval() UpperCAmelCase_ = pt_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value hf_model.load_state_dict(snake_case_ ) UpperCAmelCase_ = 3_84 UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" ) UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ ) assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] UpperCAmelCase_ = hf_model.generate(snake_case_ ) assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(snake_case_ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' UpperCAmelCase_ = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth" ) UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" ) vqa_model.eval() UpperCAmelCase_ = vqa_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ ) hf_vqa_model.load_state_dict(snake_case_ ) UpperCAmelCase_ = ["How many dogs are in this image?"] UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" ) UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth" UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" ) itm_model.eval() UpperCAmelCase_ = itm_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ ) UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"] UpperCAmelCase_ = tokenizer( snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(snake_case_ ) hf_itm_model.eval() UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ ) UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE_: int =parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
78
0
from ..utils import DummyObject, requires_backends class A__ ( metaclass=A__ ): """simple docstring""" _lowercase = ['transformers', 'torch', 'note_seq'] def __init__( self : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): requires_backends(self , ["transformers", "torch", "note_seq"] ) @classmethod def _UpperCamelCase( cls : str , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ["transformers", "torch", "note_seq"] ) @classmethod def _UpperCamelCase( cls : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ): requires_backends(cls , ["transformers", "torch", "note_seq"] )
37
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case_ : Optional[int] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case_ : Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCAmelCase_ = [] for i in range(snake_case_ ): UpperCAmelCase_ = i / num_diffusion_timesteps UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) ) return torch.tensor(snake_case_ , dtype=torch.floataa ) class __A ( UpperCamelCase__ , UpperCamelCase__ ): a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers] a__ : Optional[Any] = 2 @register_to_config def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ): if trained_betas is not None: UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa ) elif beta_schedule == "linear": UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase_ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" ) elif beta_schedule == "exp": UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) UpperCAmelCase_ = 1.0 - self.betas UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__a , __a , __a ) UpperCAmelCase_ = use_karras_sigmas def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ): if schedule_timesteps is None: UpperCAmelCase_ = self.timesteps UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: UpperCAmelCase_ = 1 if len(__a ) > 1 else 0 else: UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep UpperCAmelCase_ = self._index_counter[timestep_int] return indices[pos].item() @property def _lowercase (self : List[Any] ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ): UpperCAmelCase_ = self.index_for_timestep(__a ) UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5) return sample def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ): UpperCAmelCase_ = num_inference_steps UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy() elif self.config.timestep_spacing == "leading": UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) UpperCAmelCase_ = np.log(__a ) UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a ) if self.config.use_karras_sigmas: UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps ) UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] ) UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a ) UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) UpperCAmelCase_ = torch.from_numpy(__a ) UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(__a ).startswith("mps" ): # mps does not support float64 UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa ) else: UpperCAmelCase_ = timesteps.to(device=__a ) # empty dt and derivative UpperCAmelCase_ = None UpperCAmelCase_ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter UpperCAmelCase_ = defaultdict(__a ) def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ): # get log sigma UpperCAmelCase_ = np.log(__a ) # get distribution UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) UpperCAmelCase_ = low_idx + 1 UpperCAmelCase_ = log_sigmas[low_idx] UpperCAmelCase_ = log_sigmas[high_idx] # interpolate sigmas UpperCAmelCase_ = (low - log_sigma) / (low - high) UpperCAmelCase_ = np.clip(__a , 0 , 1 ) # transform interpolation to time range UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx UpperCAmelCase_ = t.reshape(sigma.shape ) return t def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ): UpperCAmelCase_ = in_sigmas[-1].item() UpperCAmelCase_ = in_sigmas[0].item() UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper UpperCAmelCase_ = np.linspace(0 , 1 , __a ) UpperCAmelCase_ = sigma_min ** (1 / rho) UpperCAmelCase_ = sigma_max ** (1 / rho) UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _lowercase (self : List[str] ): return self.dt is None def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ): UpperCAmelCase_ = self.index_for_timestep(__a ) # advance index counter by 1 UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method UpperCAmelCase_ = self.sigmas[step_index - 1] UpperCAmelCase_ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API UpperCAmelCase_ = 0 UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": UpperCAmelCase_ = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: UpperCAmelCase_ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep UpperCAmelCase_ = sigma_next - sigma_hat # store for 2nd order step UpperCAmelCase_ = derivative UpperCAmelCase_ = dt UpperCAmelCase_ = sample else: # 2. 2nd order / Heun's method UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next UpperCAmelCase_ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample UpperCAmelCase_ = self.dt UpperCAmelCase_ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__a ) def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__a ): # mps does not support float64 UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: UpperCAmelCase_ = self.timesteps.to(original_samples.device ) UpperCAmelCase_ = timesteps.to(original_samples.device ) UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps] UpperCAmelCase_ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): UpperCAmelCase_ = sigma.unsqueeze(-1 ) UpperCAmelCase_ = original_samples + noise * sigma return noisy_samples def __len__(self : str ): return self.config.num_train_timesteps
78
0
'''simple docstring''' import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed A_ : Any = logging.getLogger(__name__) def UpperCamelCase__ ( __magic_name__ : List[str]=2 , __magic_name__ : Tuple=3 , __magic_name__ : Tuple=16 , __magic_name__ : int = 10 , __magic_name__ : int = 2 ) -> Optional[Any]: '''simple docstring''' def get_dataset(__magic_name__ : List[str] ): snake_case__ : int = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(__magic_name__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) snake_case__ : Optional[Any] = get_dataset(__magic_name__ ) snake_case__ : Union[str, Any] = get_dataset(__magic_name__ ) snake_case__ : int = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 ) snake_case__ : Optional[int] = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[int]=None ) -> Optional[int]: '''simple docstring''' snake_case__ : Union[str, Any] = [] for epoch in range(__magic_name__ ): # Train quickly model.train() for batch in dataloader: snake_case__ , snake_case__ : Tuple = batch snake_case__ : int = model(__magic_name__ ) snake_case__ : Union[str, Any] = torch.nn.functional.mse_loss(__magic_name__ , __magic_name__ ) accelerator.backward(__magic_name__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self ): super().__init__() snake_case__ : int = nn.Parameter(torch.randn(1 ) ) snake_case__ : Tuple = nn.Parameter(torch.randn(1 ) ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ): return x * self.a + self.b class __snake_case ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) snake_case__ : Optional[int] = DummyModel() snake_case__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ , snake_case__ : Dict = dummy_dataloaders() snake_case__ : List[str] = ProjectConfiguration(total_limit=1 , project_dir=__SCREAMING_SNAKE_CASE , automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE ) # Train baseline snake_case__ : Any = Accelerator(project_config=__SCREAMING_SNAKE_CASE ) snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) snake_case__ : Any = DummyModel() snake_case__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ , snake_case__ : Optional[Any] = dummy_dataloaders() # Train baseline snake_case__ : Union[str, Any] = Accelerator() snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save initial snake_case__ : Dict = os.path.join(__SCREAMING_SNAKE_CASE , """initial""" ) accelerator.save_state(__SCREAMING_SNAKE_CASE ) ((snake_case__) , (snake_case__)) : List[str] = model.a.item(), model.b.item() snake_case__ : List[Any] = optimizer.state_dict() snake_case__ : Optional[Any] = train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ((snake_case__) , (snake_case__)) : str = model.a.item(), model.b.item() snake_case__ : Dict = optimizer.state_dict() # Train partially set_seed(4_2 ) snake_case__ : Dict = DummyModel() snake_case__ : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ , snake_case__ : Optional[Any] = dummy_dataloaders() snake_case__ : Optional[Any] = Accelerator() snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) accelerator.load_state(__SCREAMING_SNAKE_CASE ) ((snake_case__) , (snake_case__)) : Any = model.a.item(), model.b.item() snake_case__ : List[str] = optimizer.state_dict() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) snake_case__ : Dict = train(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save everything snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , """checkpoint""" ) accelerator.save_state(__SCREAMING_SNAKE_CASE ) # Load everything back in and make sure all states work accelerator.load_state(__SCREAMING_SNAKE_CASE ) test_rands += train(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ((snake_case__) , (snake_case__)) : Union[str, Any] = model.a.item(), model.b.item() snake_case__ : str = optimizer.state_dict() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) snake_case__ : Union[str, Any] = DummyModel() snake_case__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ , snake_case__ : Any = dummy_dataloaders() snake_case__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE ) # Train baseline snake_case__ : Optional[Any] = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE ) snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save initial accelerator.save_state() ((snake_case__) , (snake_case__)) : Union[str, Any] = model.a.item(), model.b.item() snake_case__ : List[str] = optimizer.state_dict() snake_case__ : Any = train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ((snake_case__) , (snake_case__)) : Optional[int] = model.a.item(), model.b.item() snake_case__ : int = optimizer.state_dict() # Train partially set_seed(4_2 ) snake_case__ : Any = DummyModel() snake_case__ : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ , snake_case__ : Union[str, Any] = dummy_dataloaders() snake_case__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE ) snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) ) ((snake_case__) , (snake_case__)) : Any = model.a.item(), model.b.item() snake_case__ : List[str] = optimizer.state_dict() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = train(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_1""" ) ) test_rands += train(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ((snake_case__) , (snake_case__)) : Tuple = model.a.item(), model.b.item() snake_case__ : Union[str, Any] = optimizer.state_dict() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self ): snake_case__ : Optional[Any] = torch.tensor([1, 2, 3] ) snake_case__ : List[Any] = torch.tensor([2, 3, 4] ) snake_case__ : Union[str, Any] = DummyModel() snake_case__ : Any = torch.optim.Adam(net.parameters() ) snake_case__ : Union[str, Any] = Accelerator() with self.assertRaises(__SCREAMING_SNAKE_CASE ) as ve: accelerator.register_for_checkpointing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) snake_case__ : Union[str, Any] = str(ve.exception ) self.assertTrue("""Item at index 0""" in message ) self.assertTrue("""Item at index 1""" in message ) self.assertFalse("""Item at index 2""" in message ) self.assertFalse("""Item at index 3""" in message ) def __UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) snake_case__ : List[str] = DummyModel() snake_case__ : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ : Union[str, Any] = torch.optim.lr_scheduler.StepLR(__SCREAMING_SNAKE_CASE , step_size=1 , gamma=0.99 ) snake_case__ , snake_case__ : Dict = dummy_dataloaders() snake_case__ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE ) # Train baseline snake_case__ : Optional[Any] = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE ) snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save initial accelerator.save_state() snake_case__ : Any = scheduler.state_dict() train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertNotEqual(__SCREAMING_SNAKE_CASE , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) ) self.assertEqual(__SCREAMING_SNAKE_CASE , scheduler.state_dict() ) def __UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) snake_case__ : str = DummyModel() snake_case__ : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE , total_limit=2 ) # Train baseline snake_case__ : Any = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = accelerator.prepare(__SCREAMING_SNAKE_CASE ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_9""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_10""" ) ) ) @require_cuda def __UpperCamelCase ( self ): snake_case__ : Tuple = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() ) if __name__ == "__main__": A_ : List[str] = "/tmp/accelerate/state_checkpointing" A_ : List[str] = DummyModel() A_ : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3) A_ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) A_ , A_ : List[str] = dummy_dataloaders() A_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline A_ : Tuple = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) A_ , A_ , A_ , A_ , A_ : Optional[int] = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) A_ , A_ : Any = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: A_ : Optional[int] = group["params"][0].device break assert param_device.type == accelerator.device.type A_ : List[str] = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: A_ : List[Any] = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: A_ : Optional[int] = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
38
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __A ( UpperCamelCase__ ): a__ : List[str] = """Salesforce/blip-image-captioning-base""" a__ : Optional[Any] = ( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) a__ : str = """image_captioner""" a__ : List[str] = AutoModelForVisionaSeq a__ : int = ["""image"""] a__ : Optional[Any] = ["""text"""] def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ): requires_backends(self , ["vision"] ) super().__init__(*__a , **__a ) def _lowercase (self : Union[str, Any] , __a : "Image" ): return self.pre_processor(images=__a , return_tensors="pt" ) def _lowercase (self : List[str] , __a : Dict ): return self.model.generate(**__a ) def _lowercase (self : int , __a : Optional[Any] ): return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
78
0
import re from filelock import FileLock try: import nltk lowerCAmelCase_ = True except (ImportError, ModuleNotFoundError): lowerCAmelCase_ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE__ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE__ ) )
39
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]: '''simple docstring''' UpperCAmelCase_ = [] if isinstance(snake_case_ , snake_case_ ): for v in tree.values(): shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(snake_case_ ) ) elif isinstance(snake_case_ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]: '''simple docstring''' UpperCAmelCase_ = [] for d in reversed(snake_case_ ): idx.append(flat_idx % d ) UpperCAmelCase_ = flat_idx // d return tuple(reversed(snake_case_ ) ) @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]: '''simple docstring''' def reduce_edge_list(snake_case_ : List[bool] ) -> None: UpperCAmelCase_ = True for i in range(len(snake_case_ ) ): UpperCAmelCase_ = -1 * (i + 1) l[reversed_idx] &= tally UpperCAmelCase_ = l[reversed_idx] if start_edges is None: UpperCAmelCase_ = [s == 0 for s in start] reduce_edge_list(snake_case_ ) if end_edges is None: UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )] reduce_edge_list(snake_case_ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(snake_case_ ) == 0: return [()] elif len(snake_case_ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] UpperCAmelCase_ = [] UpperCAmelCase_ = [] # Dimensions common to start and end can be selected directly for s, e in zip(snake_case_ , snake_case_ ): if s == e: path_list.append(slice(snake_case_ , s + 1 ) ) else: break UpperCAmelCase_ = tuple(snake_case_ ) UpperCAmelCase_ = len(snake_case_ ) # start == end, and we're done if divergence_idx == len(snake_case_ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase_ = start[divergence_idx] return tuple( path + (slice(snake_case_ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCAmelCase_ = end[divergence_idx] return tuple( path + (slice(snake_case_ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor: '''simple docstring''' UpperCAmelCase_ = t.shape[:no_batch_dims] UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) ) # _get_minimal_slice_set is inclusive UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) ) # Get an ordered list of slices to perform UpperCAmelCase_ = _get_minimal_slice_set( snake_case_ , snake_case_ , snake_case_ , ) UpperCAmelCase_ = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any: '''simple docstring''' if not (len(snake_case_ ) > 0): raise ValueError("Must provide at least one input" ) UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )] UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] ) def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ ) UpperCAmelCase_ = None if _out is not None: UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) UpperCAmelCase_ = 1 for d in orig_batch_dims: flat_batch_dim *= d UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t UpperCAmelCase_ = 0 UpperCAmelCase_ = prepped_outputs for _ in range(snake_case_ ): # Chunk the input if not low_mem: UpperCAmelCase_ = _select_chunk else: UpperCAmelCase_ = partial( _chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , ) UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ ) # Run the layer on the chunk UpperCAmelCase_ = layer(**snake_case_ ) # Allocate space for the output if out is None: UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ ) # Put the chunk in its pre-allocated space if isinstance(snake_case_ , snake_case_ ): def assign(snake_case_ : dict , snake_case_ : dict ) -> None: for k, v in da.items(): if isinstance(snake_case_ , snake_case_ ): assign(snake_case_ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: UpperCAmelCase_ = da[k] assign(snake_case_ , snake_case_ ) elif isinstance(snake_case_ , snake_case_ ): for xa, xa in zip(snake_case_ , snake_case_ ): if _add_into_out: xa[i : i + chunk_size] += xa else: UpperCAmelCase_ = xa elif isinstance(snake_case_ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: UpperCAmelCase_ = output_chunk else: raise ValueError("Not supported" ) i += chunk_size UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ ) return out class __A : def __init__(self : Dict , __a : int = 512 , ): UpperCAmelCase_ = max_chunk_size UpperCAmelCase_ = None UpperCAmelCase_ = None def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ): logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size] UpperCAmelCase_ = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__a : int ) -> bool: try: with torch.no_grad(): fn(*__a , chunk_size=__a ) return True except RuntimeError: return False UpperCAmelCase_ = 0 UpperCAmelCase_ = len(__a ) - 1 while i > min_viable_chunk_size_index: UpperCAmelCase_ = test_chunk_size(candidates[i] ) if not viable: UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2 else: UpperCAmelCase_ = i UpperCAmelCase_ = (i + len(__a ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _lowercase (self : int , __a : Iterable , __a : Iterable ): UpperCAmelCase_ = True for aa, aa in zip(__a , __a ): assert type(__a ) == type(__a ) if isinstance(__a , (list, tuple) ): consistent &= self._compare_arg_caches(__a , __a ) elif isinstance(__a , __a ): UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] consistent &= self._compare_arg_caches(__a , __a ) else: consistent &= aa == aa return consistent def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ): UpperCAmelCase_ = True UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(__a ) UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a ) else: # Otherwise, we can reuse the precomputed value UpperCAmelCase_ = False if not consistent: UpperCAmelCase_ = self._determine_favorable_chunk_size( __a , __a , __a , ) UpperCAmelCase_ = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
78
0
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int: self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) ) for a, b in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, delta=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> Tuple: UpperCamelCase : Any = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step, 3 ) self.assertEqual(len(accumulator.gradients ), 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step, 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2 ) def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : List[Any] = None ops.enable_eager_execution_internal() UpperCamelCase : Any = tf.config.list_physical_devices('CPU' ) if len(SCREAMING_SNAKE_CASE_ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) UpperCamelCase : Union[str, Any] = tf.config.list_logical_devices(device_type='CPU' ) UpperCamelCase : int = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): UpperCamelCase : Optional[int] = GradientAccumulator() UpperCamelCase : Tuple = tf.Variable([4.0, 3.0] ) UpperCamelCase , UpperCamelCase : int = create_optimizer(5e-5, 10, 5 ) UpperCamelCase : List[Any] = tf.Variable([0.0, 0.0], trainable=SCREAMING_SNAKE_CASE_ ) def accumulate_on_replica(SCREAMING_SNAKE_CASE_ ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients, [variable] ) ) ) @tf.function def accumulate(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): with strategy.scope(): UpperCamelCase : Tuple = strategy.experimental_local_results(SCREAMING_SNAKE_CASE_ ) local_variables[0].assign(SCREAMING_SNAKE_CASE_ ) local_variables[1].assign(SCREAMING_SNAKE_CASE_ ) strategy.run(SCREAMING_SNAKE_CASE_, args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(SCREAMING_SNAKE_CASE_ ) def _check_local_values(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Optional[Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value(), SCREAMING_SNAKE_CASE_, tol=1e-2 ) self.assertListAlmostEqual(values[1].value(), SCREAMING_SNAKE_CASE_, tol=1e-2 ) accumulate([1.0, 2.0], [-1.0, 1.0] ) accumulate([3.0, -1.0], [-1.0, -1.0] ) accumulate([-2.0, 2.0], [3.0, -2.0] ) self.assertEqual(accumulator.step, 3 ) _check_local_values([2.0, 3.0], [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step, 0 ) _check_local_values([0.0, 0.0], [0.0, 0.0] )
40
'''simple docstring''' import copy import re class __A : a__ : Optional[int] = """hp""" a__ : Optional[Any] = {} a__ : List[Any] = None @classmethod def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ): UpperCAmelCase_ = prefix UpperCAmelCase_ = defaults cls.build_naming_info() @staticmethod def _lowercase (__a : List[Any] , __a : List[str] ): if len(__a ) == 0: return "" UpperCAmelCase_ = None if any(char.isdigit() for char in word ): raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__a ) + 1 ): UpperCAmelCase_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: UpperCAmelCase_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__a : Union[str, Any] ): UpperCAmelCase_ = "" while integer != 0: UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s UpperCAmelCase_ = 0 while True: UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a ) if sword in info["reverse_short_word"]: continue else: UpperCAmelCase_ = sword break UpperCAmelCase_ = short_word UpperCAmelCase_ = word return short_word @staticmethod def _lowercase (__a : List[str] , __a : Union[str, Any] ): UpperCAmelCase_ = param_name.split("_" ) UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name UpperCAmelCase_ = ["", "_"] for separator in separators: UpperCAmelCase_ = separator.join(__a ) if shortname not in info["reverse_short_param"]: UpperCAmelCase_ = shortname UpperCAmelCase_ = param_name return shortname return param_name @staticmethod def _lowercase (__a : int , __a : Union[str, Any] ): UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a ) UpperCAmelCase_ = short_name UpperCAmelCase_ = param_name @classmethod def _lowercase (cls : Any ): if cls.NAMING_INFO is not None: return UpperCAmelCase_ = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } UpperCAmelCase_ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__a , __a ) UpperCAmelCase_ = info @classmethod def _lowercase (cls : int , __a : Optional[int] ): cls.build_naming_info() assert cls.PREFIX is not None UpperCAmelCase_ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k] if isinstance(__a , __a ): UpperCAmelCase_ = 1 if v else 0 UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-" UpperCAmelCase_ = f"""{key}{sep}{v}""" name.append(__a ) return "_".join(__a ) @classmethod def _lowercase (cls : Dict , __a : Dict ): UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :] if repr == "": UpperCAmelCase_ = [] else: UpperCAmelCase_ = repr.split("_" ) UpperCAmelCase_ = {} for value in values: if "-" in value: UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" ) else: UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a ) UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) ) UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k] UpperCAmelCase_ = p_v for k in cls.DEFAULTS: if k not in parameters: UpperCAmelCase_ = cls.DEFAULTS[k] return parameters
78
0
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow lowerCAmelCase__ = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Path ,lowercase__ : Union[str, None] = None ,lowercase__ : Union[List[str], None] = None ,lowercase__ : Union[str, List[str], None] = None ,lowercase__ : bool = True ,): __lowercase = [file for file in os.listdir(lowercase__ ) if os.path.isfile(os.path.join(lowercase__ ,lowercase__ ) )] if identifier is not None: __lowercase = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase__ ,lowercase__ ): for n_ in n_identifier: __lowercase = [file for file in files if n_ not in file] else: __lowercase = [file for file in files if n_identifier not in file] __lowercase = ignore_files or [] ignore_files.append('''__init__.py''' ) __lowercase = [file for file in files if file not in ignore_files] for file in files: # Open all files print('''Testing''' ,lowercase__ ) if only_modules: __lowercase = file.split('''.''' )[0] try: __lowercase = getattr(lowercase__ ,lowercase__ ) __lowercase = doctest.DocTestSuite(lowercase__ ) __lowercase = unittest.TextTestRunner().run(lowercase__ ) self.assertIs(len(result.failures ) ,0 ) except AttributeError: logger.info(F"{module_identifier} is not a module." ) else: __lowercase = doctest.testfile(str('''..''' / directory / file ) ,optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed ,0 ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = Path('''src/transformers''' ) __lowercase = '''modeling''' __lowercase = [ '''modeling_ctrl.py''', '''modeling_tf_ctrl.py''', ] self.analyze_directory(lowercase__ ,identifier=lowercase__ ,ignore_files=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = Path('''src/transformers''' ) __lowercase = '''tokenization''' self.analyze_directory(lowercase__ ,identifier=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = Path('''src/transformers''' ) __lowercase = '''configuration''' self.analyze_directory(lowercase__ ,identifier=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = Path('''src/transformers''' ) __lowercase = ['''configuration''', '''modeling''', '''tokenization'''] self.analyze_directory(lowercase__ ,n_identifier=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = Path('''docs/source''' ) __lowercase = ['''favicon.ico'''] self.analyze_directory(lowercase__ ,ignore_files=lowercase__ ,only_modules=lowercase__ )
41
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): a__ : Tuple = ["""pixel_values"""] def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ): super().__init__(**__a ) UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_pad UpperCAmelCase_ = pad_size def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ): return rescale(__a , scale=__a , data_format=__a , **__a ) def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ): UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a ) UpperCAmelCase_ = (old_height // size + 1) * size - old_height UpperCAmelCase_ = (old_width // size + 1) * size - old_width return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a ) def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ): UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size UpperCAmelCase_ = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(__a ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images] if do_pad: UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images] UpperCAmelCase_ = {"pixel_values": images} return BatchFeature(data=__a , tensor_type=__a )
78
0
'''simple docstring''' from __future__ import annotations def _UpperCamelCase ( __UpperCamelCase ) -> bool: lowerCamelCase_ = len(__UpperCamelCase ) # We need to create solution object to save path. lowerCamelCase_ = [[0 for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )] lowerCamelCase_ = run_maze(__UpperCamelCase ,0 ,0 ,__UpperCamelCase ) if solved: print('\n'.join(str(__UpperCamelCase ) for row in solutions ) ) else: print('No solution exists!' ) return solved def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool: lowerCamelCase_ = len(__UpperCamelCase ) # Final check point. if i == j == (size - 1): lowerCamelCase_ = 1 return True lowerCamelCase_ = (not i < 0) and (not j < 0) # Check lower bounds lowerCamelCase_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. lowerCamelCase_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited lowerCamelCase_ = 1 # check for directions if ( run_maze(__UpperCamelCase ,i + 1 ,__UpperCamelCase ,__UpperCamelCase ) or run_maze(__UpperCamelCase ,__UpperCamelCase ,j + 1 ,__UpperCamelCase ) or run_maze(__UpperCamelCase ,i - 1 ,__UpperCamelCase ,__UpperCamelCase ) or run_maze(__UpperCamelCase ,__UpperCamelCase ,j - 1 ,__UpperCamelCase ) ): return True lowerCamelCase_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
42
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# SCREAMING_SNAKE_CASE_: Dict =[ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] SCREAMING_SNAKE_CASE_: List[Any] =[ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] SCREAMING_SNAKE_CASE_: Union[str, Any] =[] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Tuple =f"input_blocks.{3*i + j + 1}.0." unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.attentions.{j}." SCREAMING_SNAKE_CASE_: List[str] =f"input_blocks.{3*i + j + 1}.1." unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks SCREAMING_SNAKE_CASE_: Union[str, Any] =f"up_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Any =f"output_blocks.{3*i + j}.0." unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.attentions.{j}." SCREAMING_SNAKE_CASE_: Optional[int] =f"output_blocks.{3*i + j}.1." unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 SCREAMING_SNAKE_CASE_: Union[str, Any] =f"down_blocks.{i}.downsamplers.0.conv." SCREAMING_SNAKE_CASE_: Union[str, Any] =f"input_blocks.{3*(i+1)}.0.op." unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0." SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}." unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) SCREAMING_SNAKE_CASE_: int ='mid_block.attentions.0.' SCREAMING_SNAKE_CASE_: List[Any] ='middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): SCREAMING_SNAKE_CASE_: Tuple =f"mid_block.resnets.{j}." SCREAMING_SNAKE_CASE_: Tuple =f"middle_block.{2*j}." unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: UpperCAmelCase_ = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# SCREAMING_SNAKE_CASE_: int =[ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): SCREAMING_SNAKE_CASE_: Tuple =f"encoder.down_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: int =f"encoder.down.{i}.block.{j}." vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: SCREAMING_SNAKE_CASE_: int =f"down_blocks.{i}.downsamplers.0." SCREAMING_SNAKE_CASE_: str =f"down.{i}.downsample." vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) SCREAMING_SNAKE_CASE_: int =f"up_blocks.{i}.upsamplers.0." SCREAMING_SNAKE_CASE_: List[str] =f"up.{3-i}.upsample." vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): SCREAMING_SNAKE_CASE_: List[str] =f"decoder.up_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Dict =f"decoder.up.{3-i}.block.{j}." vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): SCREAMING_SNAKE_CASE_: Any =f"mid_block.resnets.{i}." SCREAMING_SNAKE_CASE_: Tuple =f"mid.block_{i+1}." vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) SCREAMING_SNAKE_CASE_: int =[ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Tuple: '''simple docstring''' return w.reshape(*w.shape , 1 , 1 ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()} UpperCAmelCase_ = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# SCREAMING_SNAKE_CASE_: List[Any] =[ # (stable-diffusion, HF Diffusers) ('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'), ] SCREAMING_SNAKE_CASE_: Dict ={re.escape(x[1]): x[0] for x in textenc_conversion_lst} SCREAMING_SNAKE_CASE_: str =re.compile('|'.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp SCREAMING_SNAKE_CASE_: List[Any] ={'q': 0, 'k': 1, 'v': 2} def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = {} UpperCAmelCase_ = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): UpperCAmelCase_ = k[: -len(".q_proj.weight" )] UpperCAmelCase_ = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: UpperCAmelCase_ = [None, None, None] UpperCAmelCase_ = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): UpperCAmelCase_ = k[: -len(".q_proj.bias" )] UpperCAmelCase_ = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: UpperCAmelCase_ = [None, None, None] UpperCAmelCase_ = v continue UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = torch.cat(snake_case_ ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = torch.cat(snake_case_ ) return new_state_dict def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Union[str, Any]: '''simple docstring''' return text_enc_dict if __name__ == "__main__": SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.' ) SCREAMING_SNAKE_CASE_: Dict =parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors SCREAMING_SNAKE_CASE_: Any =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors') SCREAMING_SNAKE_CASE_: Dict =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors') SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'text_encoder', 'model.safetensors') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): SCREAMING_SNAKE_CASE_: Union[str, Any] =load_file(unet_path, device='cpu') else: SCREAMING_SNAKE_CASE_: int =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin') SCREAMING_SNAKE_CASE_: Dict =torch.load(unet_path, map_location='cpu') if osp.exists(vae_path): SCREAMING_SNAKE_CASE_: Tuple =load_file(vae_path, device='cpu') else: SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin') SCREAMING_SNAKE_CASE_: str =torch.load(vae_path, map_location='cpu') if osp.exists(text_enc_path): SCREAMING_SNAKE_CASE_: Tuple =load_file(text_enc_path, device='cpu') else: SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin') SCREAMING_SNAKE_CASE_: Any =torch.load(text_enc_path, map_location='cpu') # Convert the UNet model SCREAMING_SNAKE_CASE_: List[Any] =convert_unet_state_dict(unet_state_dict) SCREAMING_SNAKE_CASE_: Any ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model SCREAMING_SNAKE_CASE_: List[Any] =convert_vae_state_dict(vae_state_dict) SCREAMING_SNAKE_CASE_: Dict ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper SCREAMING_SNAKE_CASE_: Dict ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm SCREAMING_SNAKE_CASE_: Any ={'transformer.' + k: v for k, v in text_enc_dict.items()} SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict_vaa(text_enc_dict) SCREAMING_SNAKE_CASE_: int ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()} else: SCREAMING_SNAKE_CASE_: str =convert_text_enc_state_dict(text_enc_dict) SCREAMING_SNAKE_CASE_: Optional[int] ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint SCREAMING_SNAKE_CASE_: List[str] ={**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: SCREAMING_SNAKE_CASE_: List[str] ={k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: SCREAMING_SNAKE_CASE_: str ={'state_dict': state_dict} torch.save(state_dict, args.checkpoint_path)
78
0
import math def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = 0 lowercase__ = n while left <= right: lowercase__ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowercase__ = mid - 1 else: lowercase__ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
43
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCAmelCase_ ( snake_case_ : ndarray ) -> float: '''simple docstring''' return np.dot(snake_case_ , snake_case_ ) class __A : def __init__(self : int , *, __a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ): UpperCAmelCase_ = regularization UpperCAmelCase_ = gamma if kernel == "linear": UpperCAmelCase_ = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma" ) if not isinstance(self.gamma , (float, int) ): raise ValueError("gamma must be float or int" ) if not self.gamma > 0: raise ValueError("gamma must be > 0" ) UpperCAmelCase_ = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: UpperCAmelCase_ = f"""Unknown kernel: {kernel}""" raise ValueError(__a ) def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ): return np.dot(__a , __a ) def _lowercase (self : Optional[int] , __a : ndarray , __a : ndarray ): return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def _lowercase (self : str , __a : list[ndarray] , __a : ndarray ): UpperCAmelCase_ = observations UpperCAmelCase_ = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((UpperCAmelCase_) , ) = np.shape(__a ) def to_minimize(__a : ndarray ) -> float: UpperCAmelCase_ = 0 ((UpperCAmelCase_) , ) = np.shape(__a ) for i in range(__a ): for j in range(__a ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(__a ) UpperCAmelCase_ = LinearConstraint(__a , 0 , 0 ) UpperCAmelCase_ = Bounds(0 , self.regularization ) UpperCAmelCase_ = minimize( __a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x UpperCAmelCase_ = l_star # calculating mean offset of separation plane to points UpperCAmelCase_ = 0 for i in range(__a ): for j in range(__a ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) UpperCAmelCase_ = s / n def _lowercase (self : Optional[int] , __a : ndarray ): UpperCAmelCase_ = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , __a ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
78
0
'''simple docstring''' import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Optional[int] = LxmertConfig.from_json_file(_lowerCAmelCase ) print(F'Building PyTorch model from configuration: {config}' ) _lowerCamelCase : str = LxmertForPreTraining(_lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , _lowerCAmelCase ) if __name__ == "__main__": UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
44
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: List[Any] ={ 'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __A ( UpperCamelCase__ ): a__ : List[Any] = """perceiver""" def __init__(self : Optional[int] , __a : Tuple=256 , __a : Optional[Any]=1280 , __a : Optional[int]=768 , __a : Any=1 , __a : List[str]=26 , __a : Dict=8 , __a : List[Any]=8 , __a : Tuple=None , __a : List[str]=None , __a : Optional[int]="kv" , __a : Union[str, Any]=1 , __a : List[str]=1 , __a : List[Any]="gelu" , __a : List[str]=0.1 , __a : str=0.02 , __a : List[str]=1E-12 , __a : Optional[int]=True , __a : Tuple=262 , __a : Dict=2048 , __a : int=56 , __a : Optional[int]=[368, 496] , __a : Any=16 , __a : Optional[Any]=1920 , __a : Any=16 , __a : str=[1, 16, 224, 224] , **__a : Any , ): super().__init__(**__a ) UpperCAmelCase_ = num_latents UpperCAmelCase_ = d_latents UpperCAmelCase_ = d_model UpperCAmelCase_ = num_blocks UpperCAmelCase_ = num_self_attends_per_block UpperCAmelCase_ = num_self_attention_heads UpperCAmelCase_ = num_cross_attention_heads UpperCAmelCase_ = qk_channels UpperCAmelCase_ = v_channels UpperCAmelCase_ = cross_attention_shape_for_attention UpperCAmelCase_ = self_attention_widening_factor UpperCAmelCase_ = cross_attention_widening_factor UpperCAmelCase_ = hidden_act UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = use_query_residual # masked language modeling attributes UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings # image classification attributes UpperCAmelCase_ = image_size # flow attributes UpperCAmelCase_ = train_size # multimodal autoencoding attributes UpperCAmelCase_ = num_frames UpperCAmelCase_ = audio_samples_per_frame UpperCAmelCase_ = samples_per_patch UpperCAmelCase_ = output_shape class __A ( UpperCamelCase__ ): @property def _lowercase (self : Dict ): if self.task == "multiple-choice": UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def _lowercase (self : Optional[Any] ): return 1E-4 def _lowercase (self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(__a , __a ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ = preprocessor.num_special_tokens_to_add(__a ) UpperCAmelCase_ = compute_effective_axis_dimension( __a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ = [" ".join(["a"] ) * seq_length] * batch_size UpperCAmelCase_ = dict(preprocessor(__a , return_tensors=__a ) ) UpperCAmelCase_ = inputs.pop("input_ids" ) return inputs elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch ) UpperCAmelCase_ = self._generate_dummy_images(__a , __a , __a , __a ) UpperCAmelCase_ = dict(preprocessor(images=__a , return_tensors=__a ) ) UpperCAmelCase_ = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
78
0
from __future__ import annotations UpperCamelCase = 1.6021E-19 # units = C def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]: if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
45
'''simple docstring''' import requests def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> None: '''simple docstring''' UpperCAmelCase_ = {"Content-Type": "application/json"} UpperCAmelCase_ = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ ) if response.status_code != 2_00: UpperCAmelCase_ = ( "Request to slack returned an error " f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(snake_case_ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
78
0
"""simple docstring""" from __future__ import annotations from math import gcd def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 2 , _lowerCamelCase = 1 , _lowerCamelCase = 3 , ) -> int | None: '''simple docstring''' if num < 2: raise ValueError("The input value cannot be less than 2" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: return (pow(_lowerCamelCase , 2 ) + step) % modulus for _ in range(_lowerCamelCase ): # These track the position within the cycle detection logic. _lowerCamelCase : Union[str, Any] = seed _lowerCamelCase : Optional[Any] = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. _lowerCamelCase : List[str] = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : str = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Tuple = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. _lowerCamelCase : Optional[int] = gcd(hare - tortoise , _lowerCamelCase ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. _lowerCamelCase : Any = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse _lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument( '''num''', type=int, help='''The value to find a divisor of''', ) parser.add_argument( '''--attempts''', type=int, default=3, help='''The number of attempts before giving up''', ) _lowerCAmelCase : str = parser.parse_args() _lowerCAmelCase : Optional[int] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'''{args.num} is probably prime''') else: _lowerCAmelCase : List[str] = args.num // divisor print(f'''{args.num} = {divisor} * {quotient}''')
46
'''simple docstring''' from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name class __A ( UpperCamelCase__ ): def __init__(self : Any , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ): super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: UpperCAmelCase_ = ( f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a ) UpperCAmelCase_ = dict(scheduler.config ) UpperCAmelCase_ = 1 UpperCAmelCase_ = FrozenDict(__a ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: UpperCAmelCase_ = ( f"""The configuration file of this scheduler: {scheduler} has not set the configuration""" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a ) UpperCAmelCase_ = dict(scheduler.config ) UpperCAmelCase_ = True UpperCAmelCase_ = FrozenDict(__a ) if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , ) def _lowercase (self : str , __a : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__a ) def _lowercase (self : int ): self.enable_attention_slicing(__a ) def _lowercase (self : Optional[Any] ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase_ = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(__a , __a ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowercase (self : Optional[int] ): if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__a , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__(self : Dict , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ): UpperCAmelCase_ = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) UpperCAmelCase_ = self.segmentation_model(**__a ) UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() UpperCAmelCase_ = self.numpy_to_pil(__a )[0].resize(image.size ) # Run inpainting pipeline with the generated mask UpperCAmelCase_ = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
78
0
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] SCREAMING_SNAKE_CASE__ = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ): __a : str = torch.load(lowerCamelCase_ , map_location='cpu' ) return sd def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ): __a : Optional[Any] = OrderedDict() __a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __a : List[Any] = key for name_pair in rename_keys_prefix: __a : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __a : Any = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __a : int = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ): assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: __a : Dict = 'pretraining' if "vcr" in checkpoint_path: __a : int = {'visual_embedding_dim': 5_1_2} elif "vqa_advanced" in checkpoint_path: __a : int = {'visual_embedding_dim': 2_0_4_8} elif "vqa" in checkpoint_path: __a : Tuple = {'visual_embedding_dim': 2_0_4_8} elif "nlvr" in checkpoint_path: __a : List[Any] = {'visual_embedding_dim': 1_0_2_4} else: raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: __a : int = {'visual_embedding_dim': 5_1_2} __a : Any = 'multichoice' elif "vqa_advanced" in checkpoint_path: __a : Any = {'visual_embedding_dim': 2_0_4_8} __a : List[str] = 'vqa_advanced' elif "vqa" in checkpoint_path: __a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9} __a : List[Any] = 'vqa' elif "nlvr" in checkpoint_path: __a : Optional[int] = { 'visual_embedding_dim': 1_0_2_4, 'num_labels': 2, } __a : Optional[Any] = 'nlvr' __a : str = VisualBertConfig(**lowerCamelCase_ ) # Load State Dict __a : str = load_state_dict(lowerCamelCase_ ) __a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ ) if model_type == "pretraining": __a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ ) elif model_type == "vqa": __a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ ) elif model_type == "nlvr": __a : int = VisualBertForVisualReasoning(lowerCamelCase_ ) elif model_type == "multichoice": __a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # Save Checkpoints Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
47
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int ) -> bool: '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
78
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : Optional[int] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } UpperCAmelCase__ : Tuple = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ) -> Dict: '''simple docstring''' for attribute in key.split("." ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowerCAmelCase__ = "lm_head" lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ ) if weight_type is not None: lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape else: lowerCAmelCase__ = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCAmelCase__ = value elif weight_type == "weight_g": lowerCAmelCase__ = value elif weight_type == "weight_v": lowerCAmelCase__ = value elif weight_type == "bias": lowerCAmelCase__ = value else: lowerCAmelCase__ = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase__ = [] lowerCAmelCase__ = fairseq_model.state_dict() lowerCAmelCase__ = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowerCAmelCase__ = False if "conv_layers" in name: load_conv_layer( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == "group" , ) lowerCAmelCase__ = True else: for key, mapped_key in MAPPING.items(): lowerCAmelCase__ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCAmelCase__ = True if "*" in mapped_key: lowerCAmelCase__ = name.split(UpperCamelCase_ )[0].split("." )[-2] lowerCAmelCase__ = mapped_key.replace("*" , UpperCamelCase_ ) if "weight_g" in name: lowerCAmelCase__ = "weight_g" elif "weight_v" in name: lowerCAmelCase__ = "weight_v" elif "bias" in name: lowerCAmelCase__ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCAmelCase__ = "weight" else: lowerCAmelCase__ = None set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) continue if not is_used: unused_weights.append(UpperCamelCase_ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ) -> Any: '''simple docstring''' lowerCAmelCase__ = full_name.split("conv_layers." )[-1] lowerCAmelCase__ = name.split("." ) lowerCAmelCase__ = int(items[0] ) lowerCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCAmelCase__ = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCAmelCase__ = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowerCAmelCase__ = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCAmelCase__ = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(UpperCamelCase_ ) @torch.no_grad() def A ( UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : int=True ) -> Tuple: '''simple docstring''' if config_path is not None: lowerCAmelCase__ = UniSpeechConfig.from_pretrained(UpperCamelCase_ ) else: lowerCAmelCase__ = UniSpeechConfig() if is_finetuned: if dict_path: lowerCAmelCase__ = Dictionary.load_from_json(UpperCamelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCAmelCase__ = target_dict.pad_index lowerCAmelCase__ = target_dict.bos_index lowerCAmelCase__ = target_dict.eos_index lowerCAmelCase__ = len(target_dict.symbols ) lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "vocab.json" ) if not os.path.isdir(UpperCamelCase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCamelCase_ ) ) return os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) lowerCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched lowerCAmelCase__ = 42 lowerCAmelCase__ = 43 with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = WavaVecaPhonemeCTCTokenizer( UpperCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCamelCase_ , ) lowerCAmelCase__ = True if config.feat_extract_norm == "layer" else False lowerCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ) lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ ) processor.save_pretrained(UpperCamelCase_ ) lowerCAmelCase__ = UniSpeechForCTC(UpperCamelCase_ ) else: lowerCAmelCase__ = UniSpeechForPreTraining(UpperCamelCase_ ) if is_finetuned: lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} ) else: lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowerCAmelCase__ = model[0].eval() recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) hf_unispeech.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) UpperCAmelCase__ : List[str] = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
48
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class __A : a__ : int a__ : TreeNode | None = None a__ : TreeNode | None = None SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess') def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int: '''simple docstring''' if root is None: return 0 # Validation def count_nodes(snake_case_ : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(snake_case_ : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(snake_case_ ) != count_coins(snake_case_ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left ) UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right ) UpperCAmelCase_ = 1 - left_distrib_excess UpperCAmelCase_ = 1 - right_distrib_excess UpperCAmelCase_ = ( left_distrib_moves + right_distrib_moves + abs(snake_case_ ) + abs(snake_case_ ) ) UpperCAmelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(snake_case_ , snake_case_ ) return get_distrib(snake_case_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
78
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : Tuple = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = [ 'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'IBertForMaskedLM', 'IBertForMultipleChoice', 'IBertForQuestionAnswering', 'IBertForSequenceClassification', 'IBertForTokenClassification', 'IBertModel', 'IBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE_: int =logging.getLogger() def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ = parser.parse_args() return args.f def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" ) if os.path.exists(snake_case_ ): with open(snake_case_ , "r" ) as f: UpperCAmelCase_ = json.load(snake_case_ ) else: raise ValueError(f"""can't find {path}""" ) return results def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __A ( UpperCamelCase__ ): @classmethod def _lowercase (cls : Any ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def _lowercase (cls : int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertLess(result["perplexity"] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : int ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 28 ) self.assertGreaterEqual(result["eval_exact"] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : str ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_rouge1"] , 10 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : List[str] ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_bleu"] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) ) @slow def _lowercase (self : Dict ): UpperCAmelCase_ = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _lowercase (self : Any ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
78
0
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP SCREAMING_SNAKE_CASE_: Any =False try: SCREAMING_SNAKE_CASE_: Optional[Any] =_is_package_available('google.colab') except ModuleNotFoundError: pass @input.register class __A : def __init__(self : int , __a : str = None , __a : list = [] ): UpperCAmelCase_ = 0 UpperCAmelCase_ = choices UpperCAmelCase_ = prompt if sys.platform == "win32": UpperCAmelCase_ = "*" else: UpperCAmelCase_ = "➔ " def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : str = "" ): if sys.platform != "win32": writeColor(self.choices[index] , 32 , __a ) else: forceWrite(self.choices[index] , __a ) def _lowercase (self : Any , __a : int ): if index == self.position: forceWrite(f""" {self.arrow_char} """ ) self.write_choice(__a ) else: forceWrite(f""" {self.choices[index]}""" ) reset_cursor() def _lowercase (self : Optional[Any] , __a : Direction , __a : int = 1 ): UpperCAmelCase_ = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__a ) move_cursor(__a , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def _lowercase (self : Dict ): self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def _lowercase (self : Any ): self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def _lowercase (self : Optional[Any] ): move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def _lowercase (self : str ): move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = int(chr(self.current_selection ) ) UpperCAmelCase_ = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , __a ) else: return else: return def _lowercase (self : Optional[Any] , __a : int = 0 ): if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) UpperCAmelCase_ = default_choice for i in range(len(self.choices ) ): self.print_choice(__a ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: UpperCAmelCase_ = int(builtins.input() ) except ValueError: UpperCAmelCase_ = default_choice else: UpperCAmelCase_ = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(__a , "\n" ) return choice
78
0
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser a__ : Tuple = re.compile(R'\s+') def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict: """simple docstring""" return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE_ , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()} def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> str: """simple docstring""" UpperCAmelCase = [len(SCREAMING_SNAKE_CASE_ ) for line in example['''content'''].splitlines()] return {"line_mean": np.mean(SCREAMING_SNAKE_CASE_ ), "line_max": max(SCREAMING_SNAKE_CASE_ )} def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> List[str]: """simple docstring""" UpperCAmelCase = np.mean([c.isalnum() for c in example['''content''']] ) return {"alpha_frac": alpha_frac} def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Any: """simple docstring""" if example["hash"] in uniques: uniques.remove(example['''hash'''] ) return True else: return False def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=5 ) -> Optional[int]: """simple docstring""" UpperCAmelCase = ['''auto-generated''', '''autogenerated''', '''automatically generated'''] UpperCAmelCase = example['''content'''].splitlines() for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.05 ) -> List[Any]: """simple docstring""" UpperCAmelCase = ['''unit tests''', '''test file''', '''configuration file'''] UpperCAmelCase = example['''content'''].splitlines() UpperCAmelCase = 0 UpperCAmelCase = 0 # first test for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test UpperCAmelCase = example['''content'''].count('''\n''' ) UpperCAmelCase = int(coeff * nlines ) for line in lines: count_config += line.lower().count('''config''' ) count_test += line.lower().count('''test''' ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase = ['''def ''', '''class ''', '''for ''', '''while '''] UpperCAmelCase = example['''content'''].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any=4 ) -> int: """simple docstring""" UpperCAmelCase = example['''content'''].splitlines() UpperCAmelCase = 0 for line in lines: counter += line.lower().count('''=''' ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict: """simple docstring""" UpperCAmelCase = tokenizer(example['''content'''] , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids'''] UpperCAmelCase = len(example['''content'''] ) / len(SCREAMING_SNAKE_CASE_ ) return {"ratio": ratio} def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> int: """simple docstring""" UpperCAmelCase = {} results.update(get_hash(SCREAMING_SNAKE_CASE_ ) ) results.update(line_stats(SCREAMING_SNAKE_CASE_ ) ) results.update(alpha_stats(SCREAMING_SNAKE_CASE_ ) ) results.update(char_token_ratio(SCREAMING_SNAKE_CASE_ ) ) results.update(is_autogenerated(SCREAMING_SNAKE_CASE_ ) ) results.update(is_config_or_test(SCREAMING_SNAKE_CASE_ ) ) results.update(has_no_keywords(SCREAMING_SNAKE_CASE_ ) ) results.update(has_few_assignments(SCREAMING_SNAKE_CASE_ ) ) return results def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not check_uniques(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> str: """simple docstring""" with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f_in: with gzip.open(str(SCREAMING_SNAKE_CASE_ ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out: shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) os.unlink(SCREAMING_SNAKE_CASE_ ) # Settings a__ : List[Any] = HfArgumentParser(PreprocessingArguments) a__ : Tuple = parser.parse_args() if args.num_workers is None: a__ : Optional[Any] = multiprocessing.cpu_count() a__ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset a__ : List[str] = time.time() a__ : Any = load_dataset(args.dataset_name, split='train') print(F"""Time to load dataset: {time.time()-t_start:.2f}""") # Run preprocessing a__ : Any = time.time() a__ : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers) print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""") # Deduplicate hashes a__ : Tuple = set(ds.unique('hash')) a__ : Any = len(uniques) / len(ds) print(F"""Fraction of duplicates: {1-frac:.2%}""") # Deduplicate data and apply heuristics a__ : str = time.time() a__ : Optional[Any] = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args}) print(F"""Time to filter dataset: {time.time()-t_start:.2f}""") print(F"""Size of filtered dataset: {len(ds_filter)}""") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: a__ : List[str] = time.time() a__ , a__ : str = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""") print(F"""Size of deduplicate dataset: {len(ds_filter)}""") # Save data in batches of samples_per_file a__ : Tuple = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / 'duplicate_clusters.json', 'w') as f: json.dump(duplicate_clusters, f) a__ : int = output_dir / 'data' data_dir.mkdir(exist_ok=True) a__ : Tuple = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): a__ : Any = str(data_dir / F"""file-{file_number+1:012}.json""") a__ : Dict = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
51
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE_: Optional[int] ={'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =['BeitFeatureExtractor'] SCREAMING_SNAKE_CASE_: int =['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =[ 'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: int =[ 'FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
78
0
"""simple docstring""" from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def __A ( a_ :int) -> bool: __a : int = int(number**0.5) return number == sq * sq def __A ( a_ :int , a_ :int , a_ :int , a_ :int , a_ :int , a_ :int) -> tuple[int, int]: __a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __a : int = x_den * y_den * z_den __a : int = gcd(a_ , a_) top //= hcf bottom //= hcf return top, bottom def __A ( a_ :int = 35) -> int: __a : set = set() __a : int __a : Fraction = Fraction(0) __a : tuple[int, int] for x_num in range(1 , order + 1): for x_den in range(x_num + 1 , order + 1): for y_num in range(1 , order + 1): for y_den in range(y_num + 1 , order + 1): # n=1 __a : str = x_num * y_den + x_den * y_num __a : Any = x_den * y_den __a : Optional[Any] = gcd(a_ , a_) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( a_ , a_ , a_ , a_ , a_ , a_) unique_s.add(a_) # n=2 __a : List[str] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __a : str = x_den * x_den * y_den * y_den if is_sq(a_) and is_sq(a_): __a : Any = int(sqrt(a_)) __a : Union[str, Any] = int(sqrt(a_)) __a : Optional[Any] = gcd(a_ , a_) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[str] = add_three( a_ , a_ , a_ , a_ , a_ , a_) unique_s.add(a_) # n=-1 __a : Optional[Any] = x_num * y_num __a : Optional[Any] = x_den * y_num + x_num * y_den __a : int = gcd(a_ , a_) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[Any] = add_three( a_ , a_ , a_ , a_ , a_ , a_) unique_s.add(a_) # n=2 __a : List[Any] = x_num * x_num * y_num * y_num __a : Union[str, Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(a_) and is_sq(a_): __a : str = int(sqrt(a_)) __a : Tuple = int(sqrt(a_)) __a : Union[str, Any] = gcd(a_ , a_) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : int = add_three( a_ , a_ , a_ , a_ , a_ , a_) unique_s.add(a_) for num, den in unique_s: total += Fraction(a_ , a_) return total.denominator + total.numerator if __name__ == "__main__": print(F'{solution() = }')
52
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed SCREAMING_SNAKE_CASE_: Any ={ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCAmelCase_ ( snake_case_ : Any ) -> str: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False elif args.student_type == "gpt2": UpperCAmelCase_ = False def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": UpperCAmelCase_ = False def lowerCAmelCase_ ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser(description="Training" ) parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." ) parser.add_argument( "--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." ) parser.add_argument( "--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." ) parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." ) parser.add_argument( "--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." ) parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." ) parser.add_argument( "--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." ) parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." ) parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." ) parser.add_argument( "--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." ) parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." ) parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." ) parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." ) parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." ) parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." ) parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." ) parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=snake_case_ , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." ) parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" ) parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" ) parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." ) parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." ) UpperCAmelCase_ = parser.parse_args() sanity_checks(snake_case_ ) # ARGS # init_gpu_params(snake_case_ ) set_seed(snake_case_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f: json.dump(vars(snake_case_ ) , snake_case_ , indent=4 ) git_log(args.dump_path ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCAmelCase_ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ ) UpperCAmelCase_ = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) UpperCAmelCase_ = special_tok_ids UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , "rb" ) as fp: UpperCAmelCase_ = pickle.load(snake_case_ ) UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCAmelCase_ = 0.0 # do not predict special tokens UpperCAmelCase_ = torch.from_numpy(snake_case_ ) else: UpperCAmelCase_ = None UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ ) logger.info("Data loader created." ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config ) UpperCAmelCase_ = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ ) else: UpperCAmelCase_ = student_model_class(snake_case_ ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info("Student loaded." ) # TEACHER # UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case_ , snake_case_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case_ , snake_case_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCAmelCase_ = Distiller( params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ ) distiller.train() logger.info("Let's go get some drinks." ) if __name__ == "__main__": main()
78
0
from __future__ import annotations def a_ ( lowerCAmelCase_ : list[float], lowerCAmelCase_ : int ): print(F"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(lowerCAmelCase_ ): print(F"""{i}\t\t{d}""" ) def a_ ( lowerCAmelCase_ : list[dict[str, int]], lowerCAmelCase_ : list[float], lowerCAmelCase_ : int ): for j in range(lowerCAmelCase_ ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: return True return False def a_ ( lowerCAmelCase_ : list[dict[str, int]], lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int ): __lowerCAmelCase = [float('inf' )] * vertex_count __lowerCAmelCase = 0.0 for _ in range(vertex_count - 1 ): for j in range(lowerCAmelCase_ ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight']) if distance[u] != float('inf' ) and distance[u] + w < distance[v]: __lowerCAmelCase = distance[u] + w __lowerCAmelCase = check_negative_cycle(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) if negative_cycle_exists: raise Exception('Negative cycle found' ) return distance if __name__ == "__main__": import doctest doctest.testmod() _snake_case : str = int(input('Enter number of vertices: ').strip()) _snake_case : Union[str, Any] = int(input('Enter number of edges: ').strip()) _snake_case : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) _snake_case , _snake_case , _snake_case : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) _snake_case : List[Any] = {'src': src, 'dst': dest, 'weight': weight} _snake_case : Tuple = int(input('\nEnter shortest path source:').strip()) _snake_case : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
53
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : int = AutoencoderKL a__ : Optional[Any] = """sample""" a__ : Union[str, Any] = 1e-2 @property def _lowercase (self : Optional[int] ): UpperCAmelCase_ = 4 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a ) return {"sample": image} @property def _lowercase (self : Any ): return (3, 32, 32) @property def _lowercase (self : Dict ): return (3, 32, 32) def _lowercase (self : int ): UpperCAmelCase_ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict def _lowercase (self : int ): pass def _lowercase (self : int ): pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _lowercase (self : List[Any] ): # enable deterministic behavior for gradient checkpointing UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ = self.model_class(**__a ) model.to(__a ) assert not model.is_gradient_checkpointing and model.training UpperCAmelCase_ = model(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() UpperCAmelCase_ = torch.randn_like(__a ) UpperCAmelCase_ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing UpperCAmelCase_ = self.model_class(**__a ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__a ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training UpperCAmelCase_ = model_a(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() UpperCAmelCase_ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) UpperCAmelCase_ = dict(model.named_parameters() ) UpperCAmelCase_ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _lowercase (self : Any ): UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a ) self.assertIsNotNone(__a ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__a ) UpperCAmelCase_ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _lowercase (self : List[str] ): UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) UpperCAmelCase_ = model.to(__a ) model.eval() if torch_device == "mps": UpperCAmelCase_ = torch.manual_seed(0 ) else: UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase_ = image.to(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": UpperCAmelCase_ = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": UpperCAmelCase_ = torch.tensor( [-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] ) else: UpperCAmelCase_ = torch.tensor( [-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] ) self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) ) @slow class __A ( unittest.TestCase ): def _lowercase (self : Dict , __a : Dict , __a : int ): return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy""" def _lowercase (self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ): UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a ) return image def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ): UpperCAmelCase_ = "fp16" if fpaa else None UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa UpperCAmelCase_ = AutoencoderKL.from_pretrained( __a , subfolder="vae" , torch_dtype=__a , revision=__a , ) model.to(__a ).eval() return model def _lowercase (self : List[Any] , __a : List[Any]=0 ): if torch_device == "mps": return torch.manual_seed(__a ) return torch.Generator(device=__a ).manual_seed(__a ) @parameterized.expand( [ # fmt: off [33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__a , __a , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]], [47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : Dict , __a : Optional[int] , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) with torch.no_grad(): UpperCAmelCase_ = model(__a ).sample assert sample.shape == image.shape UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__a , __a , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]], [37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : int , __a : int , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]], [16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]], # fmt: on ] ) @require_torch_gpu def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() UpperCAmelCase_ = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase (self : List[str] , __a : int ): UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a ) UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _lowercase (self : Union[str, Any] , __a : Dict ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): UpperCAmelCase_ = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]], [47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]], # fmt: on ] ) def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ): UpperCAmelCase_ = self.get_sd_vae_model() UpperCAmelCase_ = self.get_sd_image(__a ) UpperCAmelCase_ = self.get_generator(__a ) with torch.no_grad(): UpperCAmelCase_ = model.encode(__a ).latent_dist UpperCAmelCase_ = dist.sample(generator=__a ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu() UpperCAmelCase_ = torch.tensor(__a ) UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(__a , __a , atol=__a )
78
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowercase : List[Any] =logging.get_logger(__name__) __lowercase : List[str] ={ """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class A ( __lowercase ): _snake_case ='''roberta''' def __init__( self: int , _lowerCAmelCase: Optional[Any]=5_0265 , _lowerCAmelCase: Dict=768 , _lowerCAmelCase: str=12 , _lowerCAmelCase: Union[str, Any]=12 , _lowerCAmelCase: Dict=3072 , _lowerCAmelCase: Optional[int]="gelu" , _lowerCAmelCase: Optional[int]=0.1 , _lowerCAmelCase: str=0.1 , _lowerCAmelCase: str=512 , _lowerCAmelCase: Any=2 , _lowerCAmelCase: Dict=0.02 , _lowerCAmelCase: Optional[Any]=1e-12 , _lowerCAmelCase: Dict=1 , _lowerCAmelCase: Union[str, Any]=0 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str="absolute" , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: int=None , **_lowerCAmelCase: Optional[Any] , ) -> Dict: '''simple docstring''' super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) UpperCAmelCase_ =vocab_size UpperCAmelCase_ =hidden_size UpperCAmelCase_ =num_hidden_layers UpperCAmelCase_ =num_attention_heads UpperCAmelCase_ =hidden_act UpperCAmelCase_ =intermediate_size UpperCAmelCase_ =hidden_dropout_prob UpperCAmelCase_ =attention_probs_dropout_prob UpperCAmelCase_ =max_position_embeddings UpperCAmelCase_ =type_vocab_size UpperCAmelCase_ =initializer_range UpperCAmelCase_ =layer_norm_eps UpperCAmelCase_ =position_embedding_type UpperCAmelCase_ =use_cache UpperCAmelCase_ =classifier_dropout class A ( __lowercase ): @property def lowerCAmelCase__ ( self: Any ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase_ ={0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_ ={0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
54
'''simple docstring''' import logging from transformers import PretrainedConfig SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__) SCREAMING_SNAKE_CASE_: Any ={ 'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json', } class __A ( UpperCamelCase__ ): a__ : List[Any] = """bertabs""" def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ): super().__init__(**__a ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_pos UpperCAmelCase_ = enc_layers UpperCAmelCase_ = enc_hidden_size UpperCAmelCase_ = enc_heads UpperCAmelCase_ = enc_ff_size UpperCAmelCase_ = enc_dropout UpperCAmelCase_ = dec_layers UpperCAmelCase_ = dec_hidden_size UpperCAmelCase_ = dec_heads UpperCAmelCase_ = dec_ff_size UpperCAmelCase_ = dec_dropout
78
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,): __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size_divisor __A = do_rescale def UpperCamelCase_ ( self : Union[str, Any] ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = GLPNImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self : int ): __A = GLPNImageProcessingTester(self ) @property def UpperCamelCase_ ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self : Any ): __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"do_resize" ) ) self.assertTrue(hasattr(A ,"size_divisor" ) ) self.assertTrue(hasattr(A ,"resample" ) ) self.assertTrue(hasattr(A ,"do_rescale" ) ) def UpperCamelCase_ ( self : str ): pass def UpperCamelCase_ ( self : Dict ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def UpperCamelCase_ ( self : Optional[Any] ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def UpperCamelCase_ ( self : int ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
55
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "imagenet-1k-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = {v: k for k, v in idalabel.items()} UpperCAmelCase_ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" UpperCAmelCase_ = BitConfig( conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , ) return config def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if "stem.conv" in name: UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: UpperCAmelCase_ = name.replace("blocks" , "layers" ) if "head.fc" in name: UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): UpperCAmelCase_ = "bit." + name if "bit" not in name and "classifier" not in name: UpperCAmelCase_ = "bit.encoder." + name return name def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = get_config(snake_case_ ) # load original model from timm UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ ) timm_model.eval() # load state_dict of original model UpperCAmelCase_ = timm_model.state_dict() for key in state_dict.copy().keys(): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val.squeeze() if "head" in key else val # load HuggingFace model UpperCAmelCase_ = BitForImageClassification(snake_case_ ) model.eval() model.load_state_dict(snake_case_ ) # create image processor UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) ) UpperCAmelCase_ = transform.transforms UpperCAmelCase_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } UpperCAmelCase_ = BitImageProcessor( do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ) UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(snake_case_ , snake_case_ ) # verify logits with torch.no_grad(): UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) UpperCAmelCase_ = timm_model(snake_case_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) if push_to_hub: print(f"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(f"""ybelkada/{model_name}""" ) processor.push_to_hub(f"""ybelkada/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
78
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _a : List[Any] = None _a : Dict = logging.get_logger(__name__) _a : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _a : Optional[Any] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } _a : Union[str, Any] = { "camembert-base": 512, } _a : List[str] = "▁" class _lowercase ( __lowercase ): _SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] _SCREAMING_SNAKE_CASE : Optional[int] = CamembertTokenizer def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE_ : List[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : int , ) -> Dict: # Mask token behave like a normal word, i.e. include the space before it __snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) __snake_case = vocab_file __snake_case = False if not self.vocab_file else True def a ( self : str , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case = [self.cls_token_id] __snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __snake_case = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
56
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): UpperCAmelCase_ = 0 def _lowercase (self : Tuple ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" ) self.assertIsInstance(__a , __a ) def _lowercase (self : str ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = CLIPConfig() # Create a dummy config file with image_proceesor_type UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ).to_dict() config_dict.pop("image_processor_type" ) UpperCAmelCase_ = CLIPImageProcessor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) # make sure private variable is not incorrectly saved UpperCAmelCase_ = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def _lowercase (self : int ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def _lowercase (self : Tuple ): with self.assertRaisesRegex( __a , "clip-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("clip-base" ) def _lowercase (self : Optional[int] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" ) def _lowercase (self : Union[str, Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" ) def _lowercase (self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" ) def _lowercase (self : Optional[int] ): try: AutoConfig.register("custom" , __a ) AutoImageProcessor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoImageProcessor.register(__a , __a ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = Path(__a ) / "preprocessor_config.json" UpperCAmelCase_ = Path(__a ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , ) json.dump({"model_type": "clip"} , open(__a , "w" ) ) UpperCAmelCase_ = CustomImageProcessor.from_pretrained(__a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a ) UpperCAmelCase_ = AutoImageProcessor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _lowercase (self : Optional[int] ): class __A ( UpperCamelCase__ ): a__ : str = True try: AutoConfig.register("custom" , __a ) AutoImageProcessor.register(__a , __a ) # If remote code is not set, the default is to use local UpperCAmelCase_ = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub UpperCAmelCase_ = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
78
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: A_ : List[str] = None A_ : List[str] = logging.get_logger(__name__) A_ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} A_ : Tuple = { 'vocab_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model', }, 'tokenizer_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json', }, } A_ : Any = { 'google/fnet-base': 512, 'google/fnet-large': 512, } A_ : List[str] = '▁' class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" a : List[Any] =VOCAB_FILES_NAMES a : str =PRETRAINED_VOCAB_FILES_MAP a : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Tuple =['''input_ids''', '''token_type_ids'''] a : int =FNetTokenizer def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="<unk>" , _lowerCamelCase="[SEP]" , _lowerCamelCase="<pad>" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , **_lowerCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCamelCase_: int = ( AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase , normalized=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token ) super().__init__( _lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , ) UpperCamelCase_: str = do_lower_case UpperCamelCase_: Union[str, Any] = remove_space UpperCamelCase_: Tuple = keep_accents UpperCamelCase_: Any = vocab_file UpperCamelCase_: List[str] = False if not self.vocab_file else True def _a ( self , _lowerCamelCase , _lowerCamelCase = None ): UpperCamelCase_: int = [self.sep_token_id] UpperCamelCase_: Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _a ( self , _lowerCamelCase , _lowerCamelCase = None ): UpperCamelCase_: Optional[int] = [self.sep_token_id] UpperCamelCase_: Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self , _lowerCamelCase , _lowerCamelCase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase_: List[Any] = os.path.join( _lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) return (out_vocab_file,)
57
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False @dataclass class __A : a__ : Optional[int] = None a__ : bool = True a__ : bool = True a__ : Optional[str] = None # Automatically constructed a__ : ClassVar[str] = "dict" a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ ) def __call__(self : Optional[Any] ): return self.pa_type def _lowercase (self : str , __a : Union[str, bytes, dict] ): try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(__a , __a ): return {"bytes": None, "path": value} elif isinstance(__a , __a ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes UpperCAmelCase_ = BytesIO() sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767 else: UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767 UpperCAmelCase_ = BytesIO(bytes() ) sf.write(__a , __a , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ): if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: UpperCAmelCase_ = token_per_repo_id or {} UpperCAmelCase_ = path.split("::" )[-1] try: UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"] UpperCAmelCase_ = token_per_repo_id[repo_id] except (ValueError, KeyError): UpperCAmelCase_ = None with xopen(__a , "rb" , use_auth_token=__a ) as f: UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a ) else: UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a ) UpperCAmelCase_ = array.T if self.mono: UpperCAmelCase_ = librosa.to_mono(__a ) if self.sampling_rate and self.sampling_rate != sampling_rate: UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate ) UpperCAmelCase_ = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def _lowercase (self : Dict ): from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ): if pa.types.is_string(storage.type ): UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() ) UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: UpperCAmelCase_ = storage.field("bytes" ) else: UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: UpperCAmelCase_ = storage.field("path" ) else: UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) return array_cast(__a , self.pa_type ) def _lowercase (self : Dict , __a : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(__a : Tuple ): with xopen(__a , "rb" ) as f: UpperCAmelCase_ = f.read() return bytes_ UpperCAmelCase_ = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) UpperCAmelCase_ = pa.array( [os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(__a , self.pa_type )
78
0
"""simple docstring""" from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowerCAmelCase : List[str] = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = '''ernie_m''' _lowerCamelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , _lowercase = 2_5_0_0_0_2 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 3_0_7_2 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 5_1_4 , _lowercase = 0.02 , _lowercase = 1 , _lowercase = 1E-05 , _lowercase=None , _lowercase=False , _lowercase=0.0 , **_lowercase , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=_lowercase , **_lowercase ) snake_case_ : Optional[int] = vocab_size snake_case_ : Dict = hidden_size snake_case_ : Optional[int] = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : int = hidden_act snake_case_ : Any = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : int = max_position_embeddings snake_case_ : Optional[Any] = initializer_range snake_case_ : Union[str, Any] = layer_norm_eps snake_case_ : Optional[int] = classifier_dropout snake_case_ : List[str] = is_decoder snake_case_ : Optional[int] = act_dropout
58
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" ) UpperCAmelCase_ = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ ) return image def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if "visual_encoder" in key: UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ ) if "blocks" in key: UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ ) if "attn" in key: UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ ) if "norm1" in key: UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ ) if "norm2" in key: UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ ) if "encoder.norm" in key: UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ ) if "encoder.patch_embed.proj" in key: UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ ) if "encoder.pos_embed" in key: UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ ) if "encoder.cls_token" in key: UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ ) if "self_attn" in key: UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ ) return key @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ ) else: UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} ) UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval() UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth" UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" ) UpperCAmelCase_ = pt_model.eval() UpperCAmelCase_ = pt_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value hf_model.load_state_dict(snake_case_ ) UpperCAmelCase_ = 3_84 UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" ) UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ ) assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] UpperCAmelCase_ = hf_model.generate(snake_case_ ) assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(snake_case_ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' UpperCAmelCase_ = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth" ) UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" ) vqa_model.eval() UpperCAmelCase_ = vqa_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ ) hf_vqa_model.load_state_dict(snake_case_ ) UpperCAmelCase_ = ["How many dogs are in this image?"] UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" ) UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth" UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" ) itm_model.eval() UpperCAmelCase_ = itm_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_key(snake_case_ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ ) UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"] UpperCAmelCase_ = tokenizer( snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(snake_case_ ) hf_itm_model.eval() UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ ) UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE_: int =parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
78
0
from __future__ import annotations import typing from collections import Counter def lowerCAmelCase_ ( __a ) -> typing.Counter[int]: """simple docstring""" lowerCamelCase__: typing.Counter[int] =Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__a , max_perimeter + 1 ): lowerCamelCase__: Dict =(base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__a ): lowerCamelCase__: str =int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def lowerCAmelCase_ ( __a = 1000 ) -> int: """simple docstring""" lowerCamelCase__: Union[str, Any] =pythagorean_triple(__a ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'Perimeter {solution()} has maximum solutions')
59
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case_ : Optional[int] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case_ : Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCAmelCase_ = [] for i in range(snake_case_ ): UpperCAmelCase_ = i / num_diffusion_timesteps UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) ) return torch.tensor(snake_case_ , dtype=torch.floataa ) class __A ( UpperCamelCase__ , UpperCamelCase__ ): a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers] a__ : Optional[Any] = 2 @register_to_config def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ): if trained_betas is not None: UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa ) elif beta_schedule == "linear": UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase_ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" ) elif beta_schedule == "exp": UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) UpperCAmelCase_ = 1.0 - self.betas UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__a , __a , __a ) UpperCAmelCase_ = use_karras_sigmas def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ): if schedule_timesteps is None: UpperCAmelCase_ = self.timesteps UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: UpperCAmelCase_ = 1 if len(__a ) > 1 else 0 else: UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep UpperCAmelCase_ = self._index_counter[timestep_int] return indices[pos].item() @property def _lowercase (self : List[Any] ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ): UpperCAmelCase_ = self.index_for_timestep(__a ) UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5) return sample def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ): UpperCAmelCase_ = num_inference_steps UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy() elif self.config.timestep_spacing == "leading": UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) UpperCAmelCase_ = np.log(__a ) UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a ) if self.config.use_karras_sigmas: UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps ) UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] ) UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a ) UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) UpperCAmelCase_ = torch.from_numpy(__a ) UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(__a ).startswith("mps" ): # mps does not support float64 UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa ) else: UpperCAmelCase_ = timesteps.to(device=__a ) # empty dt and derivative UpperCAmelCase_ = None UpperCAmelCase_ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter UpperCAmelCase_ = defaultdict(__a ) def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ): # get log sigma UpperCAmelCase_ = np.log(__a ) # get distribution UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) UpperCAmelCase_ = low_idx + 1 UpperCAmelCase_ = log_sigmas[low_idx] UpperCAmelCase_ = log_sigmas[high_idx] # interpolate sigmas UpperCAmelCase_ = (low - log_sigma) / (low - high) UpperCAmelCase_ = np.clip(__a , 0 , 1 ) # transform interpolation to time range UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx UpperCAmelCase_ = t.reshape(sigma.shape ) return t def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ): UpperCAmelCase_ = in_sigmas[-1].item() UpperCAmelCase_ = in_sigmas[0].item() UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper UpperCAmelCase_ = np.linspace(0 , 1 , __a ) UpperCAmelCase_ = sigma_min ** (1 / rho) UpperCAmelCase_ = sigma_max ** (1 / rho) UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _lowercase (self : List[str] ): return self.dt is None def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ): UpperCAmelCase_ = self.index_for_timestep(__a ) # advance index counter by 1 UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: UpperCAmelCase_ = self.sigmas[step_index] UpperCAmelCase_ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method UpperCAmelCase_ = self.sigmas[step_index - 1] UpperCAmelCase_ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API UpperCAmelCase_ = 0 UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": UpperCAmelCase_ = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: UpperCAmelCase_ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep UpperCAmelCase_ = sigma_next - sigma_hat # store for 2nd order step UpperCAmelCase_ = derivative UpperCAmelCase_ = dt UpperCAmelCase_ = sample else: # 2. 2nd order / Heun's method UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next UpperCAmelCase_ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample UpperCAmelCase_ = self.dt UpperCAmelCase_ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__a ) def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__a ): # mps does not support float64 UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: UpperCAmelCase_ = self.timesteps.to(original_samples.device ) UpperCAmelCase_ = timesteps.to(original_samples.device ) UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps] UpperCAmelCase_ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): UpperCAmelCase_ = sigma.unsqueeze(-1 ) UpperCAmelCase_ = original_samples + noise * sigma return noisy_samples def __len__(self : str ): return self.config.num_train_timesteps
78
0