code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import annotations import math def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if num <= 0: lowercase__ : Union[str, Any] = F"""{num}: Invalid input, please enter a positive integer.""" raise ValueError(lowerCamelCase__ ) lowercase__ : str = [True] * (num + 1) lowercase__ : int = [] lowercase__ : Dict = 2 lowercase__ : Any = int(math.sqrt(lowerCamelCase__ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCamelCase__ ) # Set multiples of start be False for i in range(start * start , num + 1 , lowerCamelCase__ ): if sieve[i] is True: lowercase__ : str = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(lowerCamelCase__ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
81
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Optional[Any] ): lowercase__ : Dict = tempfile.mkdtemp() # fmt: off lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) lowercase__ : Tuple = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def snake_case ( self : Any ): lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case ( self : int ): lowercase__ : Optional[int] = self.get_tokenizer() lowercase__ : List[Any] = self.get_rust_tokenizer() lowercase__ : List[str] = self.get_image_processor() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_slow.save_pretrained(self.tmpdirname ) lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_fast.save_pretrained(self.tmpdirname ) lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] ): lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : int = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.prepare_image_inputs() lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case ( self : str ): lowercase__ : Tuple = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Tuple = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = "lower newer" lowercase__ : str = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE ): processor() def snake_case ( self : Optional[Any] ): lowercase__ : Dict = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : List[str] = self.get_image_processor() lowercase__ : List[str] = self.get_tokenizer() lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = "lower newer" lowercase__ : Union[str, Any] = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
81
1
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = GPTaTokenizer lowercase_ = GPTaTokenizerFast lowercase_ = True lowercase_ = {"""add_prefix_space""": True} lowercase_ = False def snake_case ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : List[str] = {"unk_token": "<unk>"} lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ): kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : List[str] = "lower newer" lowercase__ : Optional[Any] = "lower newer" return input_text, output_text def snake_case ( self : Any ): lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__ : Dict = "lower newer" lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokens + [tokenizer.unk_token] lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if not self.test_rust_tokenizer: return lowercase__ : Dict = self.get_tokenizer() lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" # Testing tokenization lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids without special tokens lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids with special tokens lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing the unknown token lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token] lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # Simple input lowercase__ : Dict = "This is a simple input" lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[int] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) def snake_case ( self : Any ): lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input lowercase__ : Optional[int] = "This is a simple input" lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"] lowercase__ : List[Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[Any] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] lowercase__ : Any = tokenizer.pad_token_id lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def snake_case ( self : str ): lowercase__ : List[str] = "$$$" lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = "This is a simple input" lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Optional[int] = tokenizer.bos_token_id lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids ) lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def snake_case ( self : Optional[int] ): pass def snake_case ( self : Tuple ): # TODO: change to self.get_tokenizers() when the fast version is implemented lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase__ : str = "Encode this." lowercase__ : List[Any] = "This one too please." lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.encode_plus( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , ) lowercase__ : Tuple = encoded_sequence_dict["input_ids"] lowercase__ : int = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) lowercase__ : List[str] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE ) ] lowercase__ : Any = [x for x in filtered_sequence if x is not None] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @require_tokenizers class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Union[str, Any] ): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("test_opt" ) lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" ) lowercase__ : Dict = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE ) lowercase__ : int = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # Same as above self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def snake_case ( self : Tuple ): lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "bos" lowercase__ : List[Any] = tokenizer.get_vocab()["bos"] lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Union[str, Any] = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # We changed the bos token self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("./tok" ) lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
81
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : int ): lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : str = -1 lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : int = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = -1 lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer.decode(greedy_ids[0] ) lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE ) thread.start() lowercase__ : List[Any] = "" for new_text in streamer: streamer_text += new_text self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = -1 lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :] lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : Optional[Any] = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" ) lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = -1 lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n" lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def snake_case ( self : Optional[int] ): lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : int = -1 lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 ) lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(SCREAMING_SNAKE_CASE ): lowercase__ : List[str] = "" for new_text in streamer: streamer_text += new_text
81
1
def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), F"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: lowercase__ : int = F"""The input value of [n={number}] has to be > 0""" raise ValueError(lowerCamelCase__ ) else: lowercase__ : List[str] = sylvester(number - 1 ) lowercase__ : Optional[Any] = num - 1 lowercase__ : Optional[Any] = num return lower * upper + 1 if __name__ == "__main__": print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
81
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = 42 class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ): super().__init__() lowercase__ : str = layers_per_block lowercase__ : int = torch.nn.Convad( SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Union[str, Any] = None lowercase__ : Optional[int] = nn.ModuleList([] ) # down lowercase__ : Dict = block_out_channels[0] for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ : List[str] = output_channel lowercase__ : Dict = block_out_channels[i] lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1 lowercase__ : Union[str, Any] = get_down_block( SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) self.down_blocks.append(SCREAMING_SNAKE_CASE ) # mid lowercase__ : Optional[int] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) # out lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 ) lowercase__ : Union[str, Any] = nn.SiLU() lowercase__ : Tuple = 2 * out_channels if double_z else out_channels lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 ) lowercase__ : Tuple = False def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : List[str] = x lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE ) if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ): def custom_forward(*SCREAMING_SNAKE_CASE : Dict ): return module(*SCREAMING_SNAKE_CASE ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) # middle lowercase__ : int = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) else: for down_block in self.down_blocks: lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) # middle lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE ) else: # down for down_block in self.down_blocks: lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE ) # middle lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE ) # post-process lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE ) return sample class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ): super().__init__() lowercase__ : List[str] = layers_per_block lowercase__ : int = nn.Convad( SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Optional[Any] = None lowercase__ : Dict = nn.ModuleList([] ) lowercase__ : List[str] = in_channels if norm_type == "spatial" else None # mid lowercase__ : str = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) # up lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) ) lowercase__ : Dict = reversed_block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ : Tuple = output_channel lowercase__ : List[Any] = reversed_block_out_channels[i] lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1 lowercase__ : Dict = get_up_block( SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , ) self.up_blocks.append(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = output_channel # out if norm_type == "spatial": lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE ) else: lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 ) lowercase__ : Union[str, Any] = nn.SiLU() lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 ) lowercase__ : List[Any] = False def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ): lowercase__ : Tuple = z lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ): def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ): return module(*SCREAMING_SNAKE_CASE ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle lowercase__ : List[str] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) else: # middle lowercase__ : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # middle lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # post-process if latent_embeds is None: lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE ) else: lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE ) return sample class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ): super().__init__() lowercase__ : List[Any] = n_e lowercase__ : List[str] = vq_embed_dim lowercase__ : Optional[Any] = beta lowercase__ : List[str] = legacy lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) lowercase__ : Union[str, Any] = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) lowercase__ : Tuple = self.used.shape[0] lowercase__ : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": lowercase__ : Any = self.re_embed lowercase__ : Tuple = self.re_embed + 1 print( f"""Remapping {self.n_e} indices to {self.re_embed} indices. """ f"""Using {self.unknown_index} for unknown indices.""" ) else: lowercase__ : str = n_e lowercase__ : Union[str, Any] = sane_index_shape def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Any = inds.shape assert len(SCREAMING_SNAKE_CASE ) > 1 lowercase__ : List[str] = inds.reshape(ishape[0] , -1 ) lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long() lowercase__ : Dict = match.argmax(-1 ) lowercase__ : Dict = match.sum(2 ) < 1 if self.unknown_index == "random": lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: lowercase__ : List[Any] = self.unknown_index return new.reshape(SCREAMING_SNAKE_CASE ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ): lowercase__ : List[Any] = inds.shape assert len(SCREAMING_SNAKE_CASE ) > 1 lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 ) lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE ) if self.re_embed > self.used.shape[0]: # extra token lowercase__ : int = 0 # simply set to zero lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE ) return back.reshape(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ): # reshape z -> (batch, height, width, channel) and flatten lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous() lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 ) lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape ) lowercase__ : Dict = None lowercase__ : int = None # compute loss for embedding if not self.legacy: lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients lowercase__ : Union[str, Any] = z + (z_q - z).detach() # reshape back to match original input shape lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ): # shape specifying (batch, height, width, channel) if self.remap is not None: lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again # get quantized latent vectors lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE ) if shape is not None: lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE ) # reshape back to match original input shape lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ): lowercase__ : Dict = parameters lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 ) lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 ) lowercase__ : Optional[int] = deterministic lowercase__ : Tuple = torch.exp(0.5 * self.logvar ) lowercase__ : Optional[int] = torch.exp(self.logvar ) if self.deterministic: lowercase__ : Any = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype lowercase__ : Tuple = randn_tensor( self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype ) lowercase__ : str = self.mean + self.std * sample return x def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) lowercase__ : Any = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple ): return self.mean
81
1
def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return str(lowerCamelCase__ ) == str(lowerCamelCase__ )[::-1] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return int(lowerCamelCase__ ) + int(str(lowerCamelCase__ )[::-1] ) def __lowerCamelCase ( lowerCamelCase__ = 10_000 ): """simple docstring""" lowercase__ : Union[str, Any] = [] for num in range(1 , lowerCamelCase__ ): lowercase__ : Any = 0 lowercase__ : List[str] = num while iterations < 50: lowercase__ : Optional[int] = sum_reverse(lowerCamelCase__ ) iterations += 1 if is_palindrome(lowerCamelCase__ ): break else: lychrel_nums.append(lowerCamelCase__ ) return len(lowerCamelCase__ ) if __name__ == "__main__": print(f'''{solution() = }''')
81
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = DiTPipeline lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowercase_ = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowercase_ = False def snake_case ( self : int ): torch.manual_seed(0 ) lowercase__ : Optional[Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , ) lowercase__ : Dict = AutoencoderKL() lowercase__ : Any = DDIMScheduler() lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ): if str(SCREAMING_SNAKE_CASE ).startswith("mps" ): lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) lowercase__ : int = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def snake_case ( self : Any ): lowercase__ : List[Any] = "cpu" lowercase__ : str = self.get_dummy_components() lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE ) pipe.to(SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images lowercase__ : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 ) def snake_case ( self : str ): self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def snake_case ( self : Tuple ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : str ): lowercase__ : List[Any] = torch.manual_seed(0 ) lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"] lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-2 def snake_case ( self : Union[str, Any] ): lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) lowercase__ : Dict = ["vase", "umbrella"] lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = torch.manual_seed(0 ) lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-1
81
1
def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Any = abs(lowerCamelCase__ ) lowercase__ : Any = 0 while n > 0: res += n % 10 n //= 10 return res def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : List[str] = abs(lowerCamelCase__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return sum(int(lowerCamelCase__ ) for c in str(abs(lowerCamelCase__ ) ) ) def __lowerCamelCase ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase__ , lowerCamelCase__ ) -> None: lowercase__ : List[str] = F"""{func.__name__}({value})""" lowercase__ : Tuple = timeit(F"""__main__.{call}""" , setup="import __main__" ) print(F"""{call:56} = {func(lowerCamelCase__ )} -- {timing:.4f} seconds""" ) for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(lowerCamelCase__ , lowerCamelCase__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
81
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = (CMStochasticIterativeScheduler,) lowercase_ = 1_0 def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ): lowercase__ : Any = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**SCREAMING_SNAKE_CASE ) return config def snake_case ( self : Optional[int] ): lowercase__ : Tuple = 10 lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) lowercase__ : Any = scheduler.timesteps[0] lowercase__ : Optional[int] = scheduler.timesteps[1] lowercase__ : List[Any] = self.dummy_sample lowercase__ : Tuple = 0.1 * sample lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def snake_case ( self : Dict ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : Any = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Any = 1 scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = scheduler.timesteps lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : List[str] = self.dummy_model() lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(SCREAMING_SNAKE_CASE ): # 1. scale model input lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Dict = pred_prev_sample lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 192.7_614 ) < 1E-2 assert abs(result_mean.item() - 0.2_510 ) < 1E-3 def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config() lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = [106, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = scheduler.timesteps lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : Optional[int] = self.dummy_model() lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Union[str, Any] = pred_prev_sample lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 347.6_357 ) < 1E-2 assert abs(result_mean.item() - 0.4_527 ) < 1E-3 def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config() lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : int = [39, 30, 12, 15, 0] with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : Dict = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0] lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE ) with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
81
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available lowerCAmelCase__ = {'''tokenization_herbert''': ['''HerbertTokenizer''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''HerbertTokenizerFast'''] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class snake_case__: """simple docstring""" lowercase_ = 42 # setable values lowercase_ = 42 lowercase_ = 42 lowercase_ = None @classmethod def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ): return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE ) @dataclass class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = 42 class snake_case__(_UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers] lowercase_ = 42 @property def snake_case ( self : Dict ): return True @register_to_config def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ): lowercase__ : List[Any] = dtype def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ): if common is None: lowercase__ : Dict = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype ) lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ): return sample def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ): lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ): lowercase__ : Tuple = state.common.alphas_cumprod[t] lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: lowercase__ : Dict = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) ) elif variance_type == "fixed_large": lowercase__ : Union[str, Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log lowercase__ : List[Any] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": lowercase__ : List[Any] = variance lowercase__ : Union[str, Any] = state.common.betas[t] lowercase__ : Tuple = (predicted_variance + 1) / 2 lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log return variance def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ): lowercase__ : Tuple = timestep if key is None: lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 ) else: lowercase__ : Any = None # 1. compute alphas, betas lowercase__ : Dict = state.common.alphas_cumprod[t] lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) lowercase__ : Optional[Any] = 1 - alpha_prod_t lowercase__ : Optional[int] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowercase__ : Optional[Any] = model_output elif self.config.prediction_type == "v_prediction": lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """ " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 ) lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) lowercase__ : Optional[int] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ): return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ): return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __len__( self : Tuple ): return self.config.num_train_timesteps
81
1
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = (UnCLIPScheduler,) def snake_case ( self : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase__ : Dict = { "num_train_timesteps": 1_000, "variance_type": "fixed_small_log", "clip_sample": True, "clip_sample_range": 1.0, "prediction_type": "epsilon", } config.update(**SCREAMING_SNAKE_CASE ) return config def snake_case ( self : List[str] ): for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=SCREAMING_SNAKE_CASE , prev_timestep=SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict ): lowercase__ : Optional[Any] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config(variance_type="fixed_small_log" ) lowercase__ : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def snake_case ( self : Any ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config(variance_type="learned_range" ) lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = 0.5 assert scheduler._get_variance(1 , predicted_variance=SCREAMING_SNAKE_CASE ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=SCREAMING_SNAKE_CASE ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=SCREAMING_SNAKE_CASE ) - -0.0_010_011 < 1E-5 def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : Dict = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = scheduler.timesteps lowercase__ : Optional[Any] = self.dummy_model() lowercase__ : int = self.dummy_sample_deter lowercase__ : Union[str, Any] = torch.manual_seed(0 ) for i, t in enumerate(SCREAMING_SNAKE_CASE ): # 1. predict noise residual lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Tuple = pred_prev_sample lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def snake_case ( self : Any ): lowercase__ : Any = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(25 ) lowercase__ : int = scheduler.timesteps lowercase__ : List[Any] = self.dummy_model() lowercase__ : Optional[Any] = self.dummy_sample_deter lowercase__ : str = torch.manual_seed(0 ) for i, t in enumerate(SCREAMING_SNAKE_CASE ): # 1. predict noise residual lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if i + 1 == timesteps.shape[0]: lowercase__ : Union[str, Any] = None else: lowercase__ : Tuple = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowercase__ : int = scheduler.step( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prev_timestep=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Dict = pred_prev_sample lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def snake_case ( self : Dict ): pass def snake_case ( self : int ): pass
81
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ): super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: lowercase__ : Optional[Any] = ( f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) lowercase__ : int = dict(scheduler.config ) lowercase__ : Any = 1 lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: lowercase__ : Optional[Any] = ( f"""The configuration file of this scheduler: {scheduler} has not set the configuration""" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = dict(scheduler.config ) lowercase__ : Union[str, Any] = True lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE ) if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase__ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) lowercase__ : Union[str, Any] = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case ( self : Optional[Any] ): if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ): lowercase__ : Dict = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE ) lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size ) # Run inpainting pipeline with the generated mask lowercase__ : int = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
81
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''', '''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''', '''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''', '''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''', '''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''', '''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''', '''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''', '''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''', '''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''', } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """xmod""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE : List[str]=30_522 , SCREAMING_SNAKE_CASE : Union[str, Any]=768 , SCREAMING_SNAKE_CASE : Tuple=12 , SCREAMING_SNAKE_CASE : List[Any]=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=512 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Optional[Any]=1E-1_2 , SCREAMING_SNAKE_CASE : Dict=1 , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : Any="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[str]=("en_XX",) , SCREAMING_SNAKE_CASE : Any=None , **SCREAMING_SNAKE_CASE : Any , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = vocab_size lowercase__ : Optional[int] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : Any = num_attention_heads lowercase__ : int = hidden_act lowercase__ : Dict = intermediate_size lowercase__ : Optional[int] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Tuple = max_position_embeddings lowercase__ : int = type_vocab_size lowercase__ : Optional[Any] = initializer_range lowercase__ : Optional[int] = layer_norm_eps lowercase__ : int = position_embedding_type lowercase__ : List[str] = use_cache lowercase__ : Union[str, Any] = classifier_dropout lowercase__ : Tuple = pre_norm lowercase__ : str = adapter_reduction_factor lowercase__ : int = adapter_layer_norm lowercase__ : Any = adapter_reuse_layer_norm lowercase__ : List[str] = ln_before_adapter lowercase__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = default_language class snake_case__(_UpperCamelCase ): """simple docstring""" @property def snake_case ( self : Optional[int] ): if self.task == "multiple-choice": lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : List[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
81
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] lowercase__ : str = True if "large" in model_name or "huge" in model_name else False lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowercase__ : int = [3, 3, 3, 3] lowercase__ : Tuple = [5, 5, 5, 5] elif "fl4" in model_name: lowercase__ : Optional[Any] = [4, 4, 4, 4] lowercase__ : Optional[Any] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowercase__ : Union[str, Any] = [3, 3, 3, 3] if "lrf" in model_name: lowercase__ : Union[str, Any] = [3, 3, 3, 3] else: lowercase__ : Tuple = [2, 2, 2, 2] if "tiny" in model_name: lowercase__ : Optional[Any] = 96 elif "small" in model_name: lowercase__ : List[str] = 96 elif "base" in model_name: lowercase__ : str = 128 elif "large" in model_name: lowercase__ : Any = 192 elif "xlarge" in model_name: lowercase__ : str = 256 elif "huge" in model_name: lowercase__ : List[str] = 352 # set label information lowercase__ : Tuple = "huggingface/label-files" if "large" in model_name or "huge" in model_name: lowercase__ : List[Any] = "imagenet-22k-id2label.json" else: lowercase__ : Optional[int] = "imagenet-1k-id2label.json" lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : int = {v: k for k, v in idalabel.items()} lowercase__ : str = FocalNetConfig( embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , ) return config def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "patch_embed.proj" in name: lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: lowercase__ : List[str] = "encoder." + name if "encoder.layers" in name: lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: lowercase__ : List[str] = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": lowercase__ : List[str] = "layernorm.weight" if name == "norm.bias": lowercase__ : List[Any] = "layernorm.bias" if "head" in name: lowercase__ : Optional[int] = name.replace("head" , "classifier" ) else: lowercase__ : Union[str, Any] = "focalnet." + name return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): """simple docstring""" lowercase__ : List[Any] = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on lowercase__ : Union[str, Any] = model_name_to_url[model_name] print("Checkpoint URL: " , lowerCamelCase__ ) lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ ) lowercase__ : List[str] = val lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ ) lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ ) model.eval() # load state dict model.load_state_dict(lowerCamelCase__ ) # verify conversion lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : int = BitImageProcessor( do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , ) lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" ) lowercase__ : Any = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 ) lowercase__ : List[Any] = model(**lowerCamelCase__ ) lowercase__ : int = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase__ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
81
1
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance lowerCAmelCase__ = 6_378_137.0 lowerCAmelCase__ = 6_356_752.314_245 lowerCAmelCase__ = 6_3_7_8_1_3_7 def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Optional[int] = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude lowercase__ : Union[str, Any] = atan((1 - flattening) * tan(radians(lowerCamelCase__ ) ) ) lowercase__ : Optional[int] = atan((1 - flattening) * tan(radians(lowerCamelCase__ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius lowercase__ : str = haversine_distance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) / EQUATORIAL_RADIUS # Intermediate P and Q values lowercase__ : Optional[Any] = (b_lata + b_lata) / 2 lowercase__ : List[Any] = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) lowercase__ : str = (sin(lowerCamelCase__ ) ** 2) * (cos(lowerCamelCase__ ) ** 2) lowercase__ : List[Any] = cos(sigma / 2 ) ** 2 lowercase__ : Dict = (sigma - sin(lowerCamelCase__ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) lowercase__ : Tuple = (cos(lowerCamelCase__ ) ** 2) * (sin(lowerCamelCase__ ) ** 2) lowercase__ : Union[str, Any] = sin(sigma / 2 ) ** 2 lowercase__ : Any = (sigma + sin(lowerCamelCase__ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
81
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """informer""" lowercase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ): # time series specific configuration lowercase__ : Any = prediction_length lowercase__ : List[str] = context_length or prediction_length lowercase__ : Tuple = distribution_output lowercase__ : Union[str, Any] = loss lowercase__ : Union[str, Any] = input_size lowercase__ : List[str] = num_time_features lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowercase__ : List[str] = scaling lowercase__ : str = num_dynamic_real_features lowercase__ : Tuple = num_static_real_features lowercase__ : List[str] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Dict = cardinality else: lowercase__ : Dict = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Union[str, Any] = embedding_dimension else: lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase__ : Dict = num_parallel_samples # Transformer architecture configuration lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features lowercase__ : Optional[Any] = d_model lowercase__ : int = encoder_attention_heads lowercase__ : Tuple = decoder_attention_heads lowercase__ : List[Any] = encoder_ffn_dim lowercase__ : List[str] = decoder_ffn_dim lowercase__ : List[str] = encoder_layers lowercase__ : Tuple = decoder_layers lowercase__ : Union[str, Any] = dropout lowercase__ : List[Any] = attention_dropout lowercase__ : str = activation_dropout lowercase__ : int = encoder_layerdrop lowercase__ : Union[str, Any] = decoder_layerdrop lowercase__ : Tuple = activation_function lowercase__ : str = init_std lowercase__ : Tuple = use_cache # Informer lowercase__ : Union[str, Any] = attention_type lowercase__ : Union[str, Any] = sampling_factor lowercase__ : Tuple = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def snake_case ( self : str ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
81
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """cvt""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=[7, 3, 3] , SCREAMING_SNAKE_CASE : List[Any]=[4, 2, 2] , SCREAMING_SNAKE_CASE : Dict=[2, 1, 1] , SCREAMING_SNAKE_CASE : Optional[int]=[64, 192, 384] , SCREAMING_SNAKE_CASE : Any=[1, 3, 6] , SCREAMING_SNAKE_CASE : int=[1, 2, 10] , SCREAMING_SNAKE_CASE : List[Any]=[4.0, 4.0, 4.0] , SCREAMING_SNAKE_CASE : Tuple=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE : str=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE : Optional[int]=[0.0, 0.0, 0.1] , SCREAMING_SNAKE_CASE : Union[str, Any]=[True, True, True] , SCREAMING_SNAKE_CASE : Optional[Any]=[False, False, True] , SCREAMING_SNAKE_CASE : Dict=["dw_bn", "dw_bn", "dw_bn"] , SCREAMING_SNAKE_CASE : List[str]=[3, 3, 3] , SCREAMING_SNAKE_CASE : Optional[Any]=[1, 1, 1] , SCREAMING_SNAKE_CASE : Tuple=[2, 2, 2] , SCREAMING_SNAKE_CASE : Optional[Any]=[1, 1, 1] , SCREAMING_SNAKE_CASE : List[Any]=[1, 1, 1] , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Optional[Any]=1E-1_2 , **SCREAMING_SNAKE_CASE : str , ): super().__init__(**SCREAMING_SNAKE_CASE ) lowercase__ : Dict = num_channels lowercase__ : Optional[Any] = patch_sizes lowercase__ : Dict = patch_stride lowercase__ : Tuple = patch_padding lowercase__ : Optional[Any] = embed_dim lowercase__ : List[str] = num_heads lowercase__ : Tuple = depth lowercase__ : Tuple = mlp_ratio lowercase__ : Dict = attention_drop_rate lowercase__ : Optional[Any] = drop_rate lowercase__ : int = drop_path_rate lowercase__ : List[str] = qkv_bias lowercase__ : Dict = cls_token lowercase__ : List[str] = qkv_projection_method lowercase__ : str = kernel_qkv lowercase__ : int = padding_kv lowercase__ : List[str] = stride_kv lowercase__ : Optional[Any] = padding_q lowercase__ : Any = stride_q lowercase__ : int = initializer_range lowercase__ : int = layer_norm_eps
81
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) else: lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) lowercase__ : int = ["key_proj", "value_proj", "query_proj"] lowercase__ : str = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: lowercase__ : Union[str, Any] = key.split("." ) if attributes[0] == "lm_head": lowercase__ : Tuple = prophet lowercase__ : Tuple = prophet_old else: lowercase__ : Tuple = prophet.prophetnet lowercase__ : List[str] = prophet_old.model lowercase__ : int = False for attribute in attributes: if attribute in mapping: lowercase__ : int = mapping[attribute] if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0: lowercase__ : Dict = attribute elif hasattr(lowerCamelCase__ , lowerCamelCase__ ): lowercase__ : Optional[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowercase__ : Any = old_model.weight logger.info(F"""{attribute} is initialized.""" ) lowercase__ : str = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowercase__ : Tuple = old_model.bias logger.info(F"""{attribute} is initialized""" ) lowercase__ : str = True break elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ): lowercase__ : str = old_model.in_proj_weight.shape[0] // 3 lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowercase__ : Tuple = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) lowercase__ : Union[str, Any] = True break if attribute.isdigit(): lowercase__ : str = model[int(lowerCamelCase__ )] lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )] else: lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ ) if old_attribute == "": lowercase__ : str = old_model else: if not hasattr(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError(F"""{old_model} does not have {old_attribute}""" ) lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ ) if not is_key_init: raise ValueError(F"""{key} was not correctly initialized!""" ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
81
1
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right lowerCAmelCase__ = 1_2_8_0_2_2 lowerCAmelCase__ = 1_2_8_0_2_8 @require_sentencepiece class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = MaMaaaTokenizer lowercase_ = False lowercase_ = False lowercase_ = True def snake_case ( self : str ): super().setUp() lowercase__ : Union[str, Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : Dict = Path(self.tmpdirname ) save_json(SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["spm_file"] ) lowercase__ : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any ): return ( "This is a test", "This is a test", ) def snake_case ( self : Dict ): lowercase__ : Any = "</s>" lowercase__ : Any = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict ): lowercase__ : List[str] = self.get_tokenizer() lowercase__ : Tuple = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def snake_case ( self : Any ): pass def snake_case ( self : Any ): lowercase__ : List[Any] = self.get_tokenizer() lowercase__ : Optional[int] = tokenizer.tokenize("This is a test" ) self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , ) lowercase__ : Optional[Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] ) lowercase__ : List[str] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE ) self.assertEqual(SCREAMING_SNAKE_CASE , "This is a test" ) @slow def snake_case ( self : List[str] ): # fmt: off lowercase__ : List[str] = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class snake_case__(unittest.TestCase ): """simple docstring""" lowercase_ = """facebook/m2m100_418M""" lowercase_ = [ """In my opinion, there are two levels of response from the French government.""", """NSA Affair Emphasizes Complete Lack of Debate on Intelligence""", ] lowercase_ = [ """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", ] # fmt: off lowercase_ = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def snake_case ( cls : Optional[int] ): lowercase__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) lowercase__ : Optional[int] = 1 return cls def snake_case ( self : Tuple ): self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 ) def snake_case ( self : int ): lowercase__ : List[str] = self.tokenizer.get_vocab() self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): lowercase__ : int = "en" lowercase__ : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): self.assertIn(SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids ) # fmt: off lowercase__ : Optional[int] = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on lowercase__ : Any = self.tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict ): lowercase__ : Any = tempfile.mkdtemp() lowercase__ : List[str] = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = MaMaaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertDictEqual(new_tok.lang_token_to_id , SCREAMING_SNAKE_CASE ) @require_torch def snake_case ( self : Tuple ): lowercase__ : List[Any] = "en" lowercase__ : Dict = "fr" lowercase__ : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" ) lowercase__ : str = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowercase__ : Optional[Any] = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def snake_case ( self : Tuple ): lowercase__ : Dict = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowercase__ : Optional[int] = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def snake_case ( self : str ): lowercase__ : str = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowercase__ : str = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def snake_case ( self : Union[str, Any] ): lowercase__ : str = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , { # en_XX, A, test, EOS "input_ids": [[128_022, 58, 4_183, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 128_006, } , )
81
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = GPTaTokenizer lowercase_ = GPTaTokenizerFast lowercase_ = True lowercase_ = {"""add_prefix_space""": True} lowercase_ = False def snake_case ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : List[str] = {"unk_token": "<unk>"} lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ): kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : List[str] = "lower newer" lowercase__ : Optional[Any] = "lower newer" return input_text, output_text def snake_case ( self : Any ): lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__ : Dict = "lower newer" lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokens + [tokenizer.unk_token] lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if not self.test_rust_tokenizer: return lowercase__ : Dict = self.get_tokenizer() lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" # Testing tokenization lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids without special tokens lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids with special tokens lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing the unknown token lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token] lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # Simple input lowercase__ : Dict = "This is a simple input" lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[int] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) def snake_case ( self : Any ): lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input lowercase__ : Optional[int] = "This is a simple input" lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"] lowercase__ : List[Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[Any] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] lowercase__ : Any = tokenizer.pad_token_id lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def snake_case ( self : str ): lowercase__ : List[str] = "$$$" lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = "This is a simple input" lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Optional[int] = tokenizer.bos_token_id lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids ) lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def snake_case ( self : Optional[int] ): pass def snake_case ( self : Tuple ): # TODO: change to self.get_tokenizers() when the fast version is implemented lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase__ : str = "Encode this." lowercase__ : List[Any] = "This one too please." lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.encode_plus( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , ) lowercase__ : Tuple = encoded_sequence_dict["input_ids"] lowercase__ : int = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) lowercase__ : List[str] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE ) ] lowercase__ : Any = [x for x in filtered_sequence if x is not None] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @require_tokenizers class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Union[str, Any] ): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("test_opt" ) lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" ) lowercase__ : Dict = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE ) lowercase__ : int = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # Same as above self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def snake_case ( self : Tuple ): lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "bos" lowercase__ : List[Any] = tokenizer.get_vocab()["bos"] lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Union[str, Any] = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # We changed the bos token self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("./tok" ) lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
81
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''', '''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''', '''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """big_bird""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=50_358 , SCREAMING_SNAKE_CASE : Optional[int]=768 , SCREAMING_SNAKE_CASE : int=12 , SCREAMING_SNAKE_CASE : Optional[Any]=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : List[Any]="gelu_new" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : str=4_096 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : Optional[int]=1E-1_2 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Any=0 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : List[str]=66 , SCREAMING_SNAKE_CASE : List[str]="block_sparse" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Dict=64 , SCREAMING_SNAKE_CASE : List[str]=3 , SCREAMING_SNAKE_CASE : Any=None , **SCREAMING_SNAKE_CASE : List[Any] , ): super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , sep_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) lowercase__ : Any = vocab_size lowercase__ : List[str] = max_position_embeddings lowercase__ : List[Any] = hidden_size lowercase__ : str = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : Union[str, Any] = attention_probs_dropout_prob lowercase__ : Union[str, Any] = initializer_range lowercase__ : Any = type_vocab_size lowercase__ : Tuple = layer_norm_eps lowercase__ : Tuple = use_cache lowercase__ : Any = rescale_embeddings lowercase__ : int = attention_type lowercase__ : Tuple = use_bias lowercase__ : int = block_size lowercase__ : Optional[int] = num_random_blocks lowercase__ : Any = classifier_dropout class snake_case__(_UpperCamelCase ): """simple docstring""" @property def snake_case ( self : Tuple ): if self.task == "multiple-choice": lowercase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
81
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. lowerCAmelCase__ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class snake_case__(unittest.TestCase ): """simple docstring""" lowercase_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase_ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def snake_case ( self : int ): lowercase__ : List[str] = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" ) lowercase__ : Optional[int] = text_classifier("This is great !" ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] ) lowercase__ : int = text_classifier("This is great !" , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] ) lowercase__ : List[Any] = text_classifier(["This is great !", "This is bad"] , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ] , ) lowercase__ : List[Any] = text_classifier("This is great !" , top_k=1 ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] ) # Legacy behavior lowercase__ : int = text_classifier("This is great !" , return_all_scores=SCREAMING_SNAKE_CASE ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] ) lowercase__ : Optional[int] = text_classifier("This is great !" , return_all_scores=SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] ) lowercase__ : Tuple = text_classifier(["This is great !", "Something else"] , return_all_scores=SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ] , ) lowercase__ : Optional[int] = text_classifier(["This is great !", "Something else"] , return_all_scores=SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , [ {"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_0", "score": 0.504}, ] , ) @require_torch def snake_case ( self : List[Any] ): import torch lowercase__ : List[Any] = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , ) lowercase__ : Any = text_classifier("This is great !" ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] ) @require_tf def snake_case ( self : List[str] ): lowercase__ : Tuple = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" ) lowercase__ : Union[str, Any] = text_classifier("This is great !" ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] ) @slow @require_torch def snake_case ( self : Tuple ): lowercase__ : Optional[int] = pipeline("text-classification" ) lowercase__ : int = text_classifier("This is great !" ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 1.0}] ) lowercase__ : Any = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "NEGATIVE", "score": 1.0}] ) lowercase__ : str = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 0.988}] ) @slow @require_tf def snake_case ( self : Optional[Any] ): lowercase__ : Optional[int] = pipeline("text-classification" , framework="tf" ) lowercase__ : List[Any] = text_classifier("This is great !" ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 1.0}] ) lowercase__ : Union[str, Any] = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "NEGATIVE", "score": 1.0}] ) lowercase__ : Optional[Any] = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 0.988}] ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Union[str, Any] = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) return text_classifier, ["HuggingFace is in", "This is another test"] def snake_case ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Optional[Any] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 lowercase__ : str = "HuggingFace is in" lowercase__ : str = text_classifier(SCREAMING_SNAKE_CASE ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) lowercase__ : Dict = ["HuggingFace is in ", "Paris is in France"] lowercase__ : Tuple = text_classifier(SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}, {"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format lowercase__ : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , [[{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] * N, [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] * N] , ) lowercase__ : Union[str, Any] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} lowercase__ : Dict = text_classifier(SCREAMING_SNAKE_CASE ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , {"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )} , ) self.assertTrue(outputs["label"] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. lowercase__ : Optional[int] = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(SCREAMING_SNAKE_CASE ): text_classifier(SCREAMING_SNAKE_CASE ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility lowercase__ : Tuple = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
81
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case__: """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ): lowercase__ : Union[str, Any] = parent lowercase__ : Optional[int] = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : List[Any] = patch_size lowercase__ : Any = num_channels lowercase__ : Optional[int] = is_training lowercase__ : Dict = use_labels lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : Union[str, Any] = attention_probs_dropout_prob lowercase__ : List[Any] = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : Optional[int] = mask_ratio lowercase__ : Union[str, Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase__ : List[Any] = (image_size // patch_size) ** 2 lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def snake_case ( self : int ): lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : str = None if self.use_labels: lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any] = self.get_config() return config, pixel_values, labels def snake_case ( self : Tuple ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ): lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) # expected sequence length = num_patches lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2 lowercase__ : List[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase__ : Dict = 1 lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def snake_case ( self : Optional[int] ): lowercase__ : int = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs lowercase__ : str = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def snake_case ( self : List[str] ): lowercase__ : List[Any] = TFViTMAEModelTester(self ) lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def snake_case ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : Optional[int] ): lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowercase__ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) ) def snake_case ( self : Optional[Any] ): lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Union[str, Any] = [*signature.parameters.keys()] lowercase__ : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): # make the mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = outputs_dict[0].numpy() lowercase__ : Optional[int] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def snake_case ( self : str ): # make the mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : Tuple = {} for k, v in inputs_dict.items(): if tf.is_tensor(SCREAMING_SNAKE_CASE ): lowercase__ : Any = v.numpy() else: lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE ) return inputs_np_dict for model_class in self.all_model_classes: lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ): # make masks reproducible np.random.seed(2 ) lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase__ : Optional[int] = tf_noise super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(SCREAMING_SNAKE_CASE ) if module_member_name.endswith("MainLayer" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )] for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE ) } lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) inputs_dict.update({"noise": noise} ) for main_layer_class in tf_main_layer_classes: lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) ) lowercase__ : str = model(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" ) model.save(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = tf.keras.models.load_model( SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model ) lowercase__ : Dict = model(SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def snake_case ( self : Optional[int] ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) if model_class.__name__ == "TFViTMAEModel": lowercase__ : str = outputs.last_hidden_state.numpy() lowercase__ : Optional[Any] = 0 else: lowercase__ : Optional[Any] = outputs.logits.numpy() lowercase__ : Optional[int] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) if model_class.__name__ == "TFViTMAEModel": lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy() lowercase__ : Optional[int] = 0 else: lowercase__ : str = after_outputs["logits"].numpy() lowercase__ : Tuple = 0 lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 ) def snake_case ( self : List[Any] ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : str = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(SCREAMING_SNAKE_CASE ) lowercase__ : int = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowercase__ : Any = model_class.from_config(model.config ) lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model new_model.set_weights(model.get_weights() ) lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def snake_case ( self : List[Any] ): pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def snake_case ( self : str ): pass @slow def snake_case ( self : List[Any] ): lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self : Any ): return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def snake_case ( self : Union[str, Any] ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ) lowercase__ : Optional[Any] = self.default_image_processor lowercase__ : Union[str, Any] = prepare_img() lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase__ : Union[str, Any] = ViTMAEConfig() lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) ) # forward pass lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) # verify the logits lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = tf.convert_to_tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
81
1
def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" stooge(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) - 1 ) return arr def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: lowercase__ , lowercase__ : str = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: lowercase__ : Dict = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowerCamelCase__ , lowerCamelCase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowerCamelCase__ , i + t , (lowerCamelCase__) ) # Recursively sort first 2/3 elements stooge(lowerCamelCase__ , lowerCamelCase__ , (h - t) ) if __name__ == "__main__": lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')] print(stooge_sort(unsorted))
81
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO Update this lowerCAmelCase__ = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """esm""" def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = vocab_size lowercase__ : int = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : List[str] = initializer_range lowercase__ : Optional[Any] = layer_norm_eps lowercase__ : Optional[int] = position_embedding_type lowercase__ : Optional[int] = use_cache lowercase__ : Optional[int] = emb_layer_norm_before lowercase__ : List[str] = token_dropout lowercase__ : Optional[int] = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) lowercase__ : Dict = EsmFoldConfig() elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE ) lowercase__ : Dict = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) lowercase__ : List[str] = get_default_vocab_list() else: lowercase__ : List[Any] = vocab_list else: lowercase__ : List[Any] = None lowercase__ : List[str] = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def snake_case ( self : List[str] ): lowercase__ : Optional[Any] = super().to_dict() if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ): lowercase__ : Dict = self.esmfold_config.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = None lowercase_ = True lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = 0 lowercase_ = True lowercase_ = False lowercase_ = 1_2_8 lowercase_ = None def snake_case ( self : Optional[int] ): if self.trunk is None: lowercase__ : Dict = TrunkConfig() elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ): lowercase__ : int = TrunkConfig(**self.trunk ) def snake_case ( self : Union[str, Any] ): lowercase__ : int = asdict(self ) lowercase__ : Any = self.trunk.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = 4_8 lowercase_ = 1_0_2_4 lowercase_ = 1_2_8 lowercase_ = 3_2 lowercase_ = 3_2 lowercase_ = 3_2 lowercase_ = 0 lowercase_ = 0 lowercase_ = False lowercase_ = 4 lowercase_ = 1_2_8 lowercase_ = None def snake_case ( self : Dict ): if self.structure_module is None: lowercase__ : str = StructureModuleConfig() elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def snake_case ( self : Optional[Any] ): lowercase__ : int = asdict(self ) lowercase__ : Optional[int] = self.structure_module.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = 3_8_4 lowercase_ = 1_2_8 lowercase_ = 1_6 lowercase_ = 1_2_8 lowercase_ = 1_2 lowercase_ = 4 lowercase_ = 8 lowercase_ = 0.1 lowercase_ = 8 lowercase_ = 1 lowercase_ = 2 lowercase_ = 7 lowercase_ = 1_0 lowercase_ = 1e-8 lowercase_ = 1e5 def snake_case ( self : Dict ): return asdict(self ) def __lowerCamelCase ( ): """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
81
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase__ = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
81
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """deformable_detr""" lowercase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : List[Any] = backbone_config.get("model_type" ) lowercase__ : Any = CONFIG_MAPPING[backbone_model_type] lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE ) lowercase__ : int = use_timm_backbone lowercase__ : Optional[Any] = backbone_config lowercase__ : Union[str, Any] = num_channels lowercase__ : List[Any] = num_queries lowercase__ : List[Any] = max_position_embeddings lowercase__ : Union[str, Any] = d_model lowercase__ : Union[str, Any] = encoder_ffn_dim lowercase__ : Optional[Any] = encoder_layers lowercase__ : Optional[Any] = encoder_attention_heads lowercase__ : Optional[Any] = decoder_ffn_dim lowercase__ : List[Any] = decoder_layers lowercase__ : Optional[int] = decoder_attention_heads lowercase__ : str = dropout lowercase__ : Union[str, Any] = attention_dropout lowercase__ : List[str] = activation_dropout lowercase__ : Optional[Any] = activation_function lowercase__ : Optional[Any] = init_std lowercase__ : str = init_xavier_std lowercase__ : Any = encoder_layerdrop lowercase__ : int = auxiliary_loss lowercase__ : Dict = position_embedding_type lowercase__ : int = backbone lowercase__ : Optional[Any] = use_pretrained_backbone lowercase__ : List[Any] = dilation # deformable attributes lowercase__ : Dict = num_feature_levels lowercase__ : Optional[int] = encoder_n_points lowercase__ : Any = decoder_n_points lowercase__ : int = two_stage lowercase__ : int = two_stage_num_proposals lowercase__ : Union[str, Any] = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowercase__ : List[Any] = class_cost lowercase__ : Optional[int] = bbox_cost lowercase__ : Any = giou_cost # Loss coefficients lowercase__ : List[str] = mask_loss_coefficient lowercase__ : int = dice_loss_coefficient lowercase__ : Any = bbox_loss_coefficient lowercase__ : Any = giou_loss_coefficient lowercase__ : Optional[int] = eos_coefficient lowercase__ : int = focal_alpha lowercase__ : Dict = disable_custom_kernels super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def snake_case ( self : List[Any] ): return self.encoder_attention_heads @property def snake_case ( self : Union[str, Any] ): return self.d_model def snake_case ( self : str ): lowercase__ : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowercase__ : int = self.backbone_config.to_dict() lowercase__ : Union[str, Any] = self.__class__.model_type return output
81
1
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class snake_case__: """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any]=13 , SCREAMING_SNAKE_CASE : Tuple=7 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : int=99 , SCREAMING_SNAKE_CASE : str=64 , SCREAMING_SNAKE_CASE : Dict=32 , SCREAMING_SNAKE_CASE : str=5 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : int=37 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=512 , SCREAMING_SNAKE_CASE : Any=16 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=None , ): lowercase__ : List[Any] = parent lowercase__ : Optional[int] = batch_size lowercase__ : int = seq_length lowercase__ : int = is_training lowercase__ : int = use_input_mask lowercase__ : List[Any] = use_token_type_ids lowercase__ : Tuple = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : Tuple = hidden_size lowercase__ : Optional[Any] = embedding_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : str = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : List[Any] = max_position_embeddings lowercase__ : List[str] = type_vocab_size lowercase__ : Optional[Any] = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : Tuple = num_labels lowercase__ : Dict = num_choices lowercase__ : int = scope def snake_case ( self : Tuple ): lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : int = None if self.use_input_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[Any] = None if self.use_token_type_ids: lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : str = None lowercase__ : int = None lowercase__ : List[str] = None if self.use_labels: lowercase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self : List[str] ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Tuple = MegatronBertModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : str = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE ) lowercase__ : int = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE ) lowercase__ : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str ): lowercase__ : Optional[int] = MegatronBertForMaskedLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase__ : str = MegatronBertForCausalLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ): lowercase__ : Union[str, Any] = MegatronBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Optional[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : str = MegatronBertForPreTraining(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Tuple = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , next_sentence_label=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ): lowercase__ : List[Any] = MegatronBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ): lowercase__ : int = self.num_labels lowercase__ : Union[str, Any] = MegatronBertForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : str = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : Optional[Any] = self.num_labels lowercase__ : List[Any] = MegatronBertForTokenClassification(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ): lowercase__ : List[Any] = self.num_choices lowercase__ : int = MegatronBertForMultipleChoice(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Optional[int] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case ( self : Optional[int] ): lowercase__ : Dict = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : List[str] = config_and_inputs lowercase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) lowercase_ = ( { """feature-extraction""": MegatronBertModel, """fill-mask""": MegatronBertForMaskedLM, """question-answering""": MegatronBertForQuestionAnswering, """text-classification""": MegatronBertForSequenceClassification, """text-generation""": MegatronBertForCausalLM, """token-classification""": MegatronBertForTokenClassification, """zero-shot""": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) lowercase_ = True # test_resize_embeddings = False lowercase_ = False def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=False ): lowercase__ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE ) return inputs_dict def snake_case ( self : Union[str, Any] ): lowercase__ : int = MegatronBertModelTester(self ) lowercase__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def snake_case ( self : int ): self.config_tester.run_common_tests() def snake_case ( self : Any ): lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict ): lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple ): lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple ): lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] ): lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return torch.tensor( lowerCamelCase__ , dtype=torch.long , device=lowerCamelCase__ , ) lowerCAmelCase__ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class snake_case__(unittest.TestCase ): """simple docstring""" @slow @unittest.skip("Model is not available." ) def snake_case ( self : Optional[Any] ): lowercase__ : Optional[int] = "nvidia/megatron-bert-uncased-345m" if "MYDIR" in os.environ: lowercase__ : Optional[Any] = os.path.join(os.environ["MYDIR"] , SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = MegatronBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.half() lowercase__ : Tuple = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] ) with torch.no_grad(): lowercase__ : int = model(SCREAMING_SNAKE_CASE )[0] lowercase__ : Union[str, Any] = torch.Size((1, 9, 1_024) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728] for ii in range(3 ): for jj in range(3 ): lowercase__ : Tuple = output[0, ii, jj] lowercase__ : Optional[Any] = expected[3 * ii + jj] lowercase__ : List[Any] = "ii={} jj={} a={} b={}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertTrue(math.isclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rel_tol=SCREAMING_SNAKE_CASE , abs_tol=SCREAMING_SNAKE_CASE ) , msg=SCREAMING_SNAKE_CASE )
81
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowerCAmelCase__ = logging.get_logger(__name__) class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = ["""pixel_values"""] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ): super().__init__(**SCREAMING_SNAKE_CASE ) lowercase__ : str = do_rescale lowercase__ : Optional[Any] = rescale_factor lowercase__ : Any = do_pad lowercase__ : Optional[Any] = pad_size def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ): return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ): lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height lowercase__ : List[Any] = (old_width // size + 1) * size - old_width return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ): lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : str = do_pad if do_pad is not None else self.do_pad lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_pad: lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images] lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] lowercase__ : Optional[Any] = {"pixel_values": images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
81
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """deformable_detr""" lowercase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : List[Any] = backbone_config.get("model_type" ) lowercase__ : Any = CONFIG_MAPPING[backbone_model_type] lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE ) lowercase__ : int = use_timm_backbone lowercase__ : Optional[Any] = backbone_config lowercase__ : Union[str, Any] = num_channels lowercase__ : List[Any] = num_queries lowercase__ : List[Any] = max_position_embeddings lowercase__ : Union[str, Any] = d_model lowercase__ : Union[str, Any] = encoder_ffn_dim lowercase__ : Optional[Any] = encoder_layers lowercase__ : Optional[Any] = encoder_attention_heads lowercase__ : Optional[Any] = decoder_ffn_dim lowercase__ : List[Any] = decoder_layers lowercase__ : Optional[int] = decoder_attention_heads lowercase__ : str = dropout lowercase__ : Union[str, Any] = attention_dropout lowercase__ : List[str] = activation_dropout lowercase__ : Optional[Any] = activation_function lowercase__ : Optional[Any] = init_std lowercase__ : str = init_xavier_std lowercase__ : Any = encoder_layerdrop lowercase__ : int = auxiliary_loss lowercase__ : Dict = position_embedding_type lowercase__ : int = backbone lowercase__ : Optional[Any] = use_pretrained_backbone lowercase__ : List[Any] = dilation # deformable attributes lowercase__ : Dict = num_feature_levels lowercase__ : Optional[int] = encoder_n_points lowercase__ : Any = decoder_n_points lowercase__ : int = two_stage lowercase__ : int = two_stage_num_proposals lowercase__ : Union[str, Any] = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowercase__ : List[Any] = class_cost lowercase__ : Optional[int] = bbox_cost lowercase__ : Any = giou_cost # Loss coefficients lowercase__ : List[str] = mask_loss_coefficient lowercase__ : int = dice_loss_coefficient lowercase__ : Any = bbox_loss_coefficient lowercase__ : Any = giou_loss_coefficient lowercase__ : Optional[int] = eos_coefficient lowercase__ : int = focal_alpha lowercase__ : Dict = disable_custom_kernels super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def snake_case ( self : List[Any] ): return self.encoder_attention_heads @property def snake_case ( self : Union[str, Any] ): return self.d_model def snake_case ( self : str ): lowercase__ : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowercase__ : int = self.backbone_config.to_dict() lowercase__ : Union[str, Any] = self.__class__.model_type return output
81
import argparse import json from tqdm import tqdm def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , ) parser.add_argument( "--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , ) parser.add_argument( "--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , ) lowercase__ : Dict = parser.parse_args() with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open( args.gold_data_path , "w" ) as gold_file: lowercase__ : List[str] = json.load(lowerCamelCase__ ) for dpr_record in tqdm(lowerCamelCase__ ): lowercase__ : Any = dpr_record["question"] lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + "\n" ) gold_file.write("\t".join(lowerCamelCase__ ) + "\n" ) if __name__ == "__main__": main()
81
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase__ = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase__ = { '''distilbert-base-uncased''': 5_1_2, '''distilbert-base-uncased-distilled-squad''': 5_1_2, '''distilbert-base-cased''': 5_1_2, '''distilbert-base-cased-distilled-squad''': 5_1_2, '''distilbert-base-german-cased''': 5_1_2, '''distilbert-base-multilingual-cased''': 5_1_2, } lowerCAmelCase__ = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = ["""input_ids""", """attention_mask"""] lowercase_ = DistilBertTokenizer def __init__( self : List[str] , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : List[Any]="[UNK]" , SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE : Optional[int]="[PAD]" , SCREAMING_SNAKE_CASE : List[Any]="[CLS]" , SCREAMING_SNAKE_CASE : Optional[Any]="[MASK]" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : Tuple , ): super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) lowercase__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , SCREAMING_SNAKE_CASE ) != do_lower_case or normalizer_state.get("strip_accents" , SCREAMING_SNAKE_CASE ) != strip_accents or normalizer_state.get("handle_chinese_chars" , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars ): lowercase__ : List[Any] = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop("type" ) ) lowercase__ : Optional[int] = do_lower_case lowercase__ : Any = strip_accents lowercase__ : int = tokenize_chinese_chars lowercase__ : Dict = normalizer_class(**SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = do_lower_case def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=None ): lowercase__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ): lowercase__ : Optional[Any] = [self.sep_token_id] lowercase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ): lowercase__ : List[str] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE ) return tuple(SCREAMING_SNAKE_CASE )
81
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ = logging.getLogger(__name__) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : str = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , ) parser.add_argument( "--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , ) parser.add_argument( "--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , ) parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] ) parser.add_argument( "--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , ) parser.add_argument( "--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." , ) parser.add_argument( "--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." , ) lowercase__ : Optional[int] = parser.parse_args() return args def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" def fn(lowerCamelCase__ ): return tokenizer(examples["text"] ) return fn def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : str = [] for i in range(len(tokenized_data["input_ids"] ) ): lowercase__ : str = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ ) lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ ) lowercase__ : str = example.SerializeToString() records.append(lowerCamelCase__ ) return records def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit ) lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) ) print(F"""Limiting the dataset to {args.limit} entries.""" ) lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) lowercase__ : Any = os.path.join(args.output_dir , args.split ) if not os.path.exists(lowerCamelCase__ ): os.makedirs(lowerCamelCase__ ) else: lowercase__ : str = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. lowercase__ : str = tokenize_function(lowerCamelCase__ ) lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowerCamelCase__ ): # Concatenate all texts. lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()} lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 lowercase__ : List[str] = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. lowercase__ : Optional[int] = { k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )] for k, t in concatenated_examples.items() } return result lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 ) lowercase__ : str = 0 lowercase__ : str = 0 for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ): lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size] lowercase__ : str = len(dataset_snapshot["input_ids"] ) lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" ) lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ ) with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file: for i in range(len(lowerCamelCase__ ) ): lowercase__ : Optional[int] = serialized_examples[i] out_file.write(lowerCamelCase__ ) print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) ) shard_count += 1 total_records += records_containing with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f: print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = parse_args() main(args)
81
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__: """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ): lowercase__ : Union[str, Any] = parent lowercase__ : Optional[int] = batch_size lowercase__ : Optional[Any] = image_size lowercase__ : Tuple = num_channels lowercase__ : Tuple = num_stages lowercase__ : List[Any] = hidden_sizes lowercase__ : Any = depths lowercase__ : List[str] = is_training lowercase__ : int = use_labels lowercase__ : Union[str, Any] = intermediate_size lowercase__ : List[Any] = hidden_act lowercase__ : Tuple = num_labels lowercase__ : Optional[Any] = initializer_range lowercase__ : Optional[Any] = out_features lowercase__ : Union[str, Any] = out_indices lowercase__ : Tuple = scope def snake_case ( self : Dict ): lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Dict = None if self.use_labels: lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def snake_case ( self : Tuple ): return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase__ : str = None lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def snake_case ( self : Dict ): lowercase__ : str = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs lowercase__ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict def snake_case ( self : Optional[Any] ): lowercase__ : Optional[Any] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase_ = ( {"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def snake_case ( self : List[Any] ): lowercase__ : List[str] = ConvNextVaModelTester(self ) lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def snake_case ( self : Optional[int] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : List[str] ): return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def snake_case ( self : Dict ): pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def snake_case ( self : Union[str, Any] ): pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : Optional[int] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() lowercase__ : List[str] = True if model_class.__name__ in [ *get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE ), ]: continue lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.train() lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss loss.backward() def snake_case ( self : Optional[Any] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels() lowercase__ : Optional[Any] = False lowercase__ : Dict = True if ( model_class.__name__ in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )] or not model_class.supports_gradient_checkpointing ): continue lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.gradient_checkpointing_enable() model.train() lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss loss.backward() def snake_case ( self : int ): lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : str = [*signature.parameters.keys()] lowercase__ : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict ): lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ): lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Dict = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Optional[Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) @slow def snake_case ( self : List[str] ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self : List[Any] ): return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = self.default_image_processor lowercase__ : int = prepare_img() lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE ) # verify the logits lowercase__ : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
81
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class snake_case__(unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=7 , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : Any=30 , SCREAMING_SNAKE_CASE : Tuple=400 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[str]=1 / 255 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : int=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Optional[Any]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowercase__ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} lowercase__ : Optional[int] = parent lowercase__ : Optional[int] = batch_size lowercase__ : List[Any] = num_channels lowercase__ : List[str] = min_resolution lowercase__ : Tuple = max_resolution lowercase__ : Dict = do_resize lowercase__ : str = size lowercase__ : Optional[Any] = do_rescale lowercase__ : Any = rescale_factor lowercase__ : Optional[int] = do_normalize lowercase__ : Dict = image_mean lowercase__ : List[Any] = image_std lowercase__ : str = do_pad def snake_case ( self : Optional[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=False ): if not batched: lowercase__ : List[str] = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE , Image.Image ): lowercase__ , lowercase__ : List[Any] = image.size else: lowercase__ , lowercase__ : List[Any] = image.shape[1], image.shape[2] if w < h: lowercase__ : List[Any] = int(self.size["shortest_edge"] * h / w ) lowercase__ : Dict = self.size["shortest_edge"] elif w > h: lowercase__ : List[str] = self.size["shortest_edge"] lowercase__ : Tuple = int(self.size["shortest_edge"] * w / h ) else: lowercase__ : List[str] = self.size["shortest_edge"] lowercase__ : Tuple = self.size["shortest_edge"] else: lowercase__ : Optional[int] = [] for image in image_inputs: lowercase__ , lowercase__ : Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase__ : Optional[int] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0] lowercase__ : Optional[int] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = DetrImageProcessor if is_vision_available() else None def snake_case ( self : Dict ): lowercase__ : str = DetrImageProcessingTester(self ) @property def snake_case ( self : Dict ): return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self : Optional[int] ): lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_rescale" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "rescale_factor" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_pad" ) ) def snake_case ( self : Union[str, Any] ): lowercase__ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} ) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] ): pass def snake_case ( self : int ): # Initialize image_processing lowercase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input lowercase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case ( self : Optional[int] ): # Initialize image_processing lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input lowercase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ : Tuple = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case ( self : List[str] ): # Initialize image_processing lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input lowercase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ : List[str] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case ( self : Tuple ): # prepare image and target lowercase__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: lowercase__ : List[Any] = json.loads(f.read() ) lowercase__ : Optional[int] = {"image_id": 39_769, "annotations": target} # encode them lowercase__ : Any = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" ) lowercase__ : List[str] = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , return_tensors="pt" ) # verify pixel values lowercase__ : List[Any] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE ) lowercase__ : Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area lowercase__ : List[str] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE ) ) # verify boxes lowercase__ : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE ) lowercase__ : Any = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id lowercase__ : Any = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE ) ) # verify is_crowd lowercase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE ) ) # verify class_labels lowercase__ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE ) ) # verify orig_size lowercase__ : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE ) ) # verify size lowercase__ : Any = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE ) ) @slow def snake_case ( self : Optional[int] ): # prepare image, target and masks_path lowercase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: lowercase__ : Optional[int] = json.loads(f.read() ) lowercase__ : Any = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} lowercase__ : Tuple = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them lowercase__ : str = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" ) lowercase__ : Optional[int] = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , masks_path=SCREAMING_SNAKE_CASE , return_tensors="pt" ) # verify pixel values lowercase__ : List[str] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area lowercase__ : str = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE ) ) # verify boxes lowercase__ : int = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id lowercase__ : List[Any] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE ) ) # verify is_crowd lowercase__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE ) ) # verify class_labels lowercase__ : Dict = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE ) ) # verify masks lowercase__ : List[str] = 822_873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , SCREAMING_SNAKE_CASE ) # verify orig_size lowercase__ : Union[str, Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE ) ) # verify size lowercase__ : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE ) )
81
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class snake_case__(_UpperCamelCase ): """simple docstring""" @slow @require_torch def snake_case ( self : Any ): lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" ) lowercase__ : str = bertabert.config.encoder.vocab_size lowercase__ : List[str] = tokenizer.sep_token_id lowercase__ : Optional[Any] = tokenizer.cls_token_id lowercase__ : int = 128 lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) lowercase__ : Tuple = train_dataset.select(range(32 ) ) lowercase__ : Optional[int] = val_dataset.select(range(16 ) ) lowercase__ : int = 4 def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 ) lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 ) lowercase__ : Tuple = inputs.input_ids lowercase__ : Optional[int] = inputs.attention_mask lowercase__ : int = outputs.input_ids lowercase__ : Dict = outputs.input_ids.copy() lowercase__ : int = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] lowercase__ : List[Any] = outputs.attention_mask assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids ) assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ): lowercase__ : Union[str, Any] = pred.label_ids lowercase__ : Dict = pred.predictions # all unnecessary tokens are removed lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE ) return {"accuracy": accuracy} # map train dataset lowercase__ : List[str] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset lowercase__ : Any = val_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) lowercase__ : List[str] = self.get_auto_remove_tmp_dir() lowercase__ : int = SeqaSeqTrainingArguments( output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowercase__ : str = SeqaSeqTrainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , ) # start training trainer.train()
81
1
from __future__ import annotations lowerCAmelCase__ = 8.988e9 # units = N * m^s * C^-2 def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : List[Any] = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if distance < 0: raise ValueError("Distance cannot be negative" ) if force == 0: lowercase__ : Any = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: lowercase__ : List[str] = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: lowercase__ : Optional[Any] = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: lowercase__ : str = (COULOMBS_CONSTANT * charge_product / abs(lowerCamelCase__ )) ** 0.5 return {"distance": distance} raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
81
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowercase__ : Tuple = 192 lowercase__ : List[Any] = 768 lowercase__ : Tuple = 12 lowercase__ : List[str] = 3 lowercase__ : List[Any] = [800, 1_333] lowercase__ : Union[str, Any] = False elif yolos_name == "yolos_s_dWr": lowercase__ : str = 330 lowercase__ : List[Any] = 14 lowercase__ : Tuple = 6 lowercase__ : Optional[int] = 1_320 elif "yolos_s" in yolos_name: lowercase__ : Dict = 384 lowercase__ : str = 1_536 lowercase__ : List[Any] = 12 lowercase__ : List[Any] = 6 elif "yolos_b" in yolos_name: lowercase__ : int = [800, 1_344] lowercase__ : Tuple = 91 lowercase__ : Optional[int] = "huggingface/label-files" lowercase__ : Optional[int] = "coco-detection-id2label.json" lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : List[Any] = idalabel lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :] lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size] lowercase__ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : str = in_proj_weight[-config.hidden_size :, :] lowercase__ : Tuple = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "backbone" in name: lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" ) if "cls_token" in name: lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: lowercase__ : int = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowercase__ : Optional[int] = name.replace("attn" , "attention.self" ) if "norm1" in name: lowercase__ : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowercase__ : int = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" ) return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ ) if "qkv" in key: lowercase__ : Dict = key.split("." ) lowercase__ : List[Any] = int(key_split[2] ) lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowercase__ : str = val[:dim, :] lowercase__ : int = val[ dim : dim * 2, : ] lowercase__ : str = val[-dim:, :] else: lowercase__ : Tuple = val[:dim] lowercase__ : Any = val[dim : dim * 2] lowercase__ : Optional[Any] = val[-dim:] else: lowercase__ : Optional[Any] = val return orig_state_dict def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ ) # load original state_dict lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"] # load 🤗 model lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ ) model.eval() lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image, prepared by YolosImageProcessor lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512 lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ ) lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" ) lowercase__ : int = model(**lowerCamelCase__ ) lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes lowercase__ , lowercase__ : int = None, None if yolos_name == "yolos_ti": lowercase__ : Optional[int] = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) lowercase__ : Dict = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": lowercase__ : Any = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) lowercase__ : List[str] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": lowercase__ : Dict = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) lowercase__ : Tuple = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": lowercase__ : Optional[Any] = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) lowercase__ : int = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": lowercase__ : List[str] = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) lowercase__ : List[str] = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: lowercase__ : Tuple = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) lowercase__ : Optional[int] = model_mapping[yolos_name] image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" ) model.push_to_hub(lowerCamelCase__ , organization="hustvl" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
81
1
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class snake_case__(unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : List[str]=7 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Tuple=400 , SCREAMING_SNAKE_CASE : List[Any]=3 , ): lowercase__ : Optional[Any] = parent lowercase__ : Any = do_resize lowercase__ : str = size if size is not None else {"shortest_edge": 288} lowercase__ : Any = size_divisor lowercase__ : str = do_rescale lowercase__ : Any = rescale_factor lowercase__ : Any = do_normalize lowercase__ : Union[str, Any] = do_center_crop lowercase__ : Tuple = image_mean lowercase__ : Any = image_std lowercase__ : Tuple = do_pad lowercase__ : Tuple = batch_size lowercase__ : str = num_channels lowercase__ : List[Any] = min_resolution lowercase__ : str = max_resolution def snake_case ( self : List[str] ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any]=False ): if not batched: lowercase__ : int = self.size["shortest_edge"] lowercase__ : Optional[Any] = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE , Image.Image ): lowercase__ , lowercase__ : Union[str, Any] = image.size else: lowercase__ , lowercase__ : int = image.shape[1], image.shape[2] lowercase__ : Any = size / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: lowercase__ , lowercase__ : Dict = size, scale * w else: lowercase__ , lowercase__ : Tuple = scale * h, size lowercase__ : Tuple = int((1_333 / 800) * size ) if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > max_size: lowercase__ : Tuple = max_size / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = newh * scale lowercase__ : List[str] = neww * scale lowercase__ , lowercase__ : str = int(newh + 0.5 ), int(neww + 0.5 ) lowercase__ , lowercase__ : str = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: lowercase__ : List[Any] = [] for image in image_inputs: lowercase__ , lowercase__ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase__ : List[str] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0] lowercase__ : int = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = BridgeTowerImageProcessor if is_vision_available() else None def snake_case ( self : Dict ): lowercase__ : int = BridgeTowerImageProcessingTester(self ) @property def snake_case ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self : Dict ): lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size_divisor" ) ) def snake_case ( self : Optional[int] ): pass def snake_case ( self : List[Any] ): # Initialize image processor lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input lowercase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : str = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ : List[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case ( self : int ): # Initialize image processor lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input lowercase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case ( self : Optional[int] ): # Initialize image processor lowercase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ : List[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
81
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''], '''processing_mgp_str''': ['''MgpstrProcessor'''], '''tokenization_mgp_str''': ['''MgpstrTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MgpstrModel''', '''MgpstrPreTrainedModel''', '''MgpstrForSceneTextRecognition''', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
1
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def __lowerCamelCase ( ): """simple docstring""" print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
81
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Optional[Any] ): lowercase__ : Dict = tempfile.mkdtemp() # fmt: off lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) lowercase__ : Tuple = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def snake_case ( self : Any ): lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case ( self : int ): lowercase__ : Optional[int] = self.get_tokenizer() lowercase__ : List[Any] = self.get_rust_tokenizer() lowercase__ : List[str] = self.get_image_processor() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_slow.save_pretrained(self.tmpdirname ) lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_fast.save_pretrained(self.tmpdirname ) lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] ): lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : int = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.prepare_image_inputs() lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case ( self : str ): lowercase__ : Tuple = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Tuple = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = "lower newer" lowercase__ : str = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE ): processor() def snake_case ( self : Optional[Any] ): lowercase__ : Dict = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : List[str] = self.get_image_processor() lowercase__ : List[str] = self.get_tokenizer() lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = "lower newer" lowercase__ : Union[str, Any] = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
81
1
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy lowerCAmelCase__ = logging.getLogger(__name__) def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , ): """simple docstring""" lowercase__ : Union[str, Any] = bnb_quantization_config.load_in_abit lowercase__ : List[str] = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) lowercase__ : Optional[Any] = [] # custom device map if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(device_map.keys() ) > 1: lowercase__ : str = [key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowercase__ : Union[str, Any] = get_keys_to_not_convert(lowerCamelCase__ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(lowerCamelCase__ ) lowercase__ : List[str] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowercase__ : List[str] = [] lowercase__ : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(lowerCamelCase__ ) # compatibility with peft lowercase__ : List[str] = load_in_abit lowercase__ : Optional[int] = load_in_abit lowercase__ : Optional[int] = get_parameter_device(lowerCamelCase__ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) lowercase__ : Tuple = replace_with_bnb_layers(lowerCamelCase__ , lowerCamelCase__ , modules_to_not_convert=lowerCamelCase__ ) # convert param to the right dtype lowercase__ : Dict = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowercase__ : List[Any] = name.replace(".weight" , "" ).replace(".bias" , "" ) lowercase__ : Union[str, Any] = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(lowerCamelCase__ ): param.to(lowerCamelCase__ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowercase__ : int = replace_with_bnb_layers( lowerCamelCase__ , lowerCamelCase__ , modules_to_not_convert=lowerCamelCase__ ) lowercase__ : Any = get_quantized_model_device_map( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , max_memory=lowerCamelCase__ , no_split_module_classes=lowerCamelCase__ , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowercase__ : Dict = True lowercase__ : Tuple = any(x in list(device_map.values() ) for x in ["cpu", "disk"] ) load_checkpoint_in_model( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCamelCase__ , offload_state_dict=lowerCamelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(lowerCamelCase__ , device_map=lowerCamelCase__ , offload_dir=lowerCamelCase__ ) def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): lowercase__ : Tuple = {"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) lowercase__ : List[Any] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowercase__ : Optional[Any] = {} lowercase__ : Union[str, Any] = special_dtypes lowercase__ : Tuple = no_split_module_classes lowercase__ : Optional[Any] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowercase__ : Any = get_balanced_memory( lowerCamelCase__ , low_zero=(device_map == "balanced_low_0") , max_memory=lowerCamelCase__ , **lowerCamelCase__ , ) lowercase__ : List[str] = max_memory lowercase__ : str = infer_auto_device_map(lowerCamelCase__ , **lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): # check if don't have any quantized module on the cpu lowercase__ : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowercase__ : Dict = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( "\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ): """simple docstring""" if modules_to_not_convert is None: lowercase__ : int = [] lowercase__ , lowercase__ : str = _replace_with_bnb_layers( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): """simple docstring""" lowercase__ : Tuple = False for name, module in model.named_children(): if current_key_name is None: lowercase__ : List[Any] = [] current_key_name.append(lowerCamelCase__ ) if isinstance(lowerCamelCase__ , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowercase__ : Optional[Any] = ".".join(lowerCamelCase__ ) lowercase__ : Optional[int] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowercase__ : Optional[int] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowercase__ : Any = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCamelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowercase__ : Union[str, Any] = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False" ) lowercase__ : Union[str, Any] = module.weight.data if module.bias is not None: lowercase__ : Optional[int] = module.bias.data bnb_module.requires_grad_(lowerCamelCase__ ) setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowercase__ : Tuple = True if len(list(module.children() ) ) > 0: lowercase__ , lowercase__ : List[str] = _replace_with_bnb_layers( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowercase__ : List[Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" with init_empty_weights(): lowercase__ : List[Any] = deepcopy(lowerCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowercase__ : Optional[int] = find_tied_parameters(lowerCamelCase__ ) # For compatibility with Accelerate < 0.18 if isinstance(lowerCamelCase__ , lowerCamelCase__ ): lowercase__ : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowercase__ : Dict = sum(lowerCamelCase__ , [] ) lowercase__ : int = len(lowerCamelCase__ ) > 0 # Check if it is a base model lowercase__ : Tuple = False if hasattr(lowerCamelCase__ , "base_model_prefix" ): lowercase__ : int = not hasattr(lowerCamelCase__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowercase__ : List[str] = list(model.named_children() ) lowercase__ : Dict = [list_modules[-1][0]] # add last module together with tied weights lowercase__ : Optional[int] = set(lowerCamelCase__ ) - set(lowerCamelCase__ ) lowercase__ : Dict = list(set(lowerCamelCase__ ) ) + list(lowerCamelCase__ ) # remove ".weight" from the keys lowercase__ : int = [".weight", ".bias"] lowercase__ : int = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowercase__ : Tuple = name.replace(lowerCamelCase__ , "" ) filtered_module_names.append(lowerCamelCase__ ) return filtered_module_names def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" for m in model.modules(): if isinstance(lowerCamelCase__ , bnb.nn.Linearabit ): return True return False def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return next(parameter.parameters() ).device def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(lowerCamelCase__ , lowerCamelCase__ , 0 , dtype=lowerCamelCase__ , value=lowerCamelCase__ ) lowercase__ : Any = param_name lowercase__ : Dict = model if "." in tensor_name: lowercase__ : Any = tensor_name.split("." ) for split in splits[:-1]: lowercase__ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowercase__ : List[Any] = new_module lowercase__ : str = splits[-1] # offload weights lowercase__ : Any = False offload_weight(module._parameters[tensor_name] , lowerCamelCase__ , lowerCamelCase__ , index=lowerCamelCase__ ) if hasattr(module._parameters[tensor_name] , "SCB" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , lowerCamelCase__ , index=lowerCamelCase__ , ) else: offload_weight(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index=lowerCamelCase__ ) offload_weight(lowerCamelCase__ , param_name.replace("weight" , "SCB" ) , lowerCamelCase__ , index=lowerCamelCase__ ) set_module_tensor_to_device(lowerCamelCase__ , lowerCamelCase__ , "meta" , dtype=lowerCamelCase__ , value=torch.empty(*param.size() ) )
81
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : int ): lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : str = -1 lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : int = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = -1 lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer.decode(greedy_ids[0] ) lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE ) thread.start() lowercase__ : List[Any] = "" for new_text in streamer: streamer_text += new_text self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = -1 lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :] lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : Optional[Any] = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" ) lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = -1 lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n" lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def snake_case ( self : Optional[int] ): lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : int = -1 lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 ) lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(SCREAMING_SNAKE_CASE ): lowercase__ : List[str] = "" for new_text in streamer: streamer_text += new_text
81
1
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCAmelCase__ = { '''cola''': 2, '''mnli''': 3, '''mrpc''': 2, '''sst-2''': 2, '''sts-b''': 1, '''qqp''': 2, '''qnli''': 2, '''rte''': 2, '''wnli''': 2, } logging.set_verbosity_info() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" lowercase__ : List[str] = XLNetConfig.from_json_file(lowerCamelCase__ ) lowercase__ : Optional[int] = finetuning_task.lower() if finetuning_task is not None else "" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) lowercase__ : Any = finetuning_task lowercase__ : Optional[int] = GLUE_TASKS_NUM_LABELS[finetuning_task] lowercase__ : str = XLNetForSequenceClassification(lowerCamelCase__ ) elif "squad" in finetuning_task: lowercase__ : Optional[Any] = finetuning_task lowercase__ : Tuple = XLNetForQuestionAnswering(lowerCamelCase__ ) else: lowercase__ : int = XLNetLMHeadModel(lowerCamelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model lowercase__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) lowercase__ : int = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) print(F"""Save PyTorch model to {os.path.abspath(lowerCamelCase__ )}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) print(F"""Save configuration file to {os.path.abspath(lowerCamelCase__ )}""" ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--xlnet_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained XLNet model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--finetuning_task''', default=None, type=str, help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''', ) lowerCAmelCase__ = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
81
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = 42 class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ): super().__init__() lowercase__ : str = layers_per_block lowercase__ : int = torch.nn.Convad( SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Union[str, Any] = None lowercase__ : Optional[int] = nn.ModuleList([] ) # down lowercase__ : Dict = block_out_channels[0] for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ : List[str] = output_channel lowercase__ : Dict = block_out_channels[i] lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1 lowercase__ : Union[str, Any] = get_down_block( SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) self.down_blocks.append(SCREAMING_SNAKE_CASE ) # mid lowercase__ : Optional[int] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) # out lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 ) lowercase__ : Union[str, Any] = nn.SiLU() lowercase__ : Tuple = 2 * out_channels if double_z else out_channels lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 ) lowercase__ : Tuple = False def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : List[str] = x lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE ) if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ): def custom_forward(*SCREAMING_SNAKE_CASE : Dict ): return module(*SCREAMING_SNAKE_CASE ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) # middle lowercase__ : int = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) else: for down_block in self.down_blocks: lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) # middle lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE ) else: # down for down_block in self.down_blocks: lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE ) # middle lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE ) # post-process lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE ) return sample class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ): super().__init__() lowercase__ : List[str] = layers_per_block lowercase__ : int = nn.Convad( SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Optional[Any] = None lowercase__ : Dict = nn.ModuleList([] ) lowercase__ : List[str] = in_channels if norm_type == "spatial" else None # mid lowercase__ : str = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) # up lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) ) lowercase__ : Dict = reversed_block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ : Tuple = output_channel lowercase__ : List[Any] = reversed_block_out_channels[i] lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1 lowercase__ : Dict = get_up_block( SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , ) self.up_blocks.append(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = output_channel # out if norm_type == "spatial": lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE ) else: lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 ) lowercase__ : Union[str, Any] = nn.SiLU() lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 ) lowercase__ : List[Any] = False def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ): lowercase__ : Tuple = z lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ): def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ): return module(*SCREAMING_SNAKE_CASE ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle lowercase__ : List[str] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) else: # middle lowercase__ : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # middle lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # post-process if latent_embeds is None: lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE ) else: lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE ) return sample class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ): super().__init__() lowercase__ : List[Any] = n_e lowercase__ : List[str] = vq_embed_dim lowercase__ : Optional[Any] = beta lowercase__ : List[str] = legacy lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) lowercase__ : Union[str, Any] = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) lowercase__ : Tuple = self.used.shape[0] lowercase__ : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": lowercase__ : Any = self.re_embed lowercase__ : Tuple = self.re_embed + 1 print( f"""Remapping {self.n_e} indices to {self.re_embed} indices. """ f"""Using {self.unknown_index} for unknown indices.""" ) else: lowercase__ : str = n_e lowercase__ : Union[str, Any] = sane_index_shape def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Any = inds.shape assert len(SCREAMING_SNAKE_CASE ) > 1 lowercase__ : List[str] = inds.reshape(ishape[0] , -1 ) lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long() lowercase__ : Dict = match.argmax(-1 ) lowercase__ : Dict = match.sum(2 ) < 1 if self.unknown_index == "random": lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: lowercase__ : List[Any] = self.unknown_index return new.reshape(SCREAMING_SNAKE_CASE ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ): lowercase__ : List[Any] = inds.shape assert len(SCREAMING_SNAKE_CASE ) > 1 lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 ) lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE ) if self.re_embed > self.used.shape[0]: # extra token lowercase__ : int = 0 # simply set to zero lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE ) return back.reshape(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ): # reshape z -> (batch, height, width, channel) and flatten lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous() lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 ) lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape ) lowercase__ : Dict = None lowercase__ : int = None # compute loss for embedding if not self.legacy: lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients lowercase__ : Union[str, Any] = z + (z_q - z).detach() # reshape back to match original input shape lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ): # shape specifying (batch, height, width, channel) if self.remap is not None: lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again # get quantized latent vectors lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE ) if shape is not None: lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE ) # reshape back to match original input shape lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ): lowercase__ : Dict = parameters lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 ) lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 ) lowercase__ : Optional[int] = deterministic lowercase__ : Tuple = torch.exp(0.5 * self.logvar ) lowercase__ : Optional[int] = torch.exp(self.logvar ) if self.deterministic: lowercase__ : Any = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype lowercase__ : Tuple = randn_tensor( self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype ) lowercase__ : str = self.mean + self.std * sample return x def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) lowercase__ : Any = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple ): return self.mean
81
1
import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() lowerCAmelCase__ = { '''bart''': ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''bert''': ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-base-cased-finetuned-mrpc''': ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''dpr''': ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''gpt2''': ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlnet''': ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlm''': ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlm-roberta''': ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''transfo-xl''': ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''openai-gpt''': ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''roberta''': ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''layoutlm''': ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''roberta-large-mnli''': ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''camembert''': ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''flaubert''': ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''distilbert''': ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''distilbert-base-distilled-squad''': ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''lxmert''': ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''lxmert-visual-feature-encoder''': ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''ctrl''': ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''albert''': ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''t5''': ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''electra''': ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''wav2vec2''': ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=True ): """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : Optional[Any] = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models ) lowercase__ : Tuple = config_class.from_json_file(lowerCamelCase__ ) lowercase__ : List[Any] = True lowercase__ : Dict = True print(F"""Building TensorFlow model from configuration: {config}""" ) lowercase__ : Tuple = model_class(lowerCamelCase__ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : Union[str, Any] = cached_file( lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : str = load_pytorch_checkpoint_in_tfa_model(lowerCamelCase__ , lowerCamelCase__ ) if compare_with_pt_model: lowercase__ : int = tf_model(tf_model.dummy_inputs , training=lowerCamelCase__ ) # build the network lowercase__ : Tuple = torch.load(lowerCamelCase__ , map_location="cpu" ) lowercase__ : Optional[Any] = pt_model_class.from_pretrained( pretrained_model_name_or_path=lowerCamelCase__ , config=lowerCamelCase__ , state_dict=lowerCamelCase__ ) with torch.no_grad(): lowercase__ : Any = pt_model(**pt_model.dummy_inputs ) lowercase__ : int = pto[0].numpy() lowercase__ : List[Any] = tfo[0].numpy() lowercase__ : List[Any] = np.amax(np.abs(np_pt - np_tf ) ) print(F"""Max absolute difference between models outputs {diff}""" ) assert diff <= 2e-2, F"""Error, model absolute difference is >2e-2: {diff}""" # Save pytorch-model print(F"""Save TensorFlow model to {tf_dump_path}""" ) tf_model.save_weights(lowerCamelCase__ , save_format="h5" ) def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , ): """simple docstring""" if args_model_type is None: lowercase__ : Tuple = list(MODEL_CLASSES.keys() ) else: lowercase__ : int = [args_model_type] for j, model_type in enumerate(lowerCamelCase__ , start=1 ): print("=" * 100 ) print(F""" Converting model type {j}/{len(lowerCamelCase__ )}: {model_type}""" ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Union[str, Any] = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(lowerCamelCase__ , lowerCamelCase__ ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" ) continue lowercase__ : Tuple = model_shortcut_name elif only_convert_finetuned_models: print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" ) continue print( F""" Converting checkpoint {i}/{len(lowerCamelCase__ )}: {model_shortcut_name} - model_type {model_type}""" ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : Optional[Any] = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any] = config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : Union[str, Any] = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models ) else: lowercase__ : Any = model_shortcut_name if os.path.isfile(lowerCamelCase__ ): lowercase__ : Optional[Any] = "converted_model" convert_pt_checkpoint_to_tf( model_type=lowerCamelCase__ , pytorch_checkpoint_path=lowerCamelCase__ , config_file=lowerCamelCase__ , tf_dump_path=os.path.join(lowerCamelCase__ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=lowerCamelCase__ , ) if remove_cached_files: os.remove(lowerCamelCase__ ) os.remove(lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.''' ) parser.add_argument( '''--model_type''', default=None, type=str, help=( f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ''' '''convert all the models from AWS.''' ), ) parser.add_argument( '''--pytorch_checkpoint_path''', default=None, type=str, help=( '''Path to the PyTorch checkpoint path or shortcut name to download from AWS. ''' '''If not given, will download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--config_file''', default=None, type=str, help=( '''The config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture. If not given and ''' '''--pytorch_checkpoint_path is not given or is a shortcut name ''' '''use the configuration associated to the shortcut name on the AWS''' ), ) parser.add_argument( '''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.''' ) parser.add_argument( '''--use_cached_models''', action='''store_true''', help='''Use cached models if possible instead of updating to latest checkpoint versions.''', ) parser.add_argument( '''--remove_cached_files''', action='''store_true''', help='''Remove pytorch models after conversion (save memory when converting in batches).''', ) parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''') lowerCAmelCase__ = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
81
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = DiTPipeline lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowercase_ = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowercase_ = False def snake_case ( self : int ): torch.manual_seed(0 ) lowercase__ : Optional[Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , ) lowercase__ : Dict = AutoencoderKL() lowercase__ : Any = DDIMScheduler() lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ): if str(SCREAMING_SNAKE_CASE ).startswith("mps" ): lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) lowercase__ : int = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def snake_case ( self : Any ): lowercase__ : List[Any] = "cpu" lowercase__ : str = self.get_dummy_components() lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE ) pipe.to(SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images lowercase__ : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 ) def snake_case ( self : str ): self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def snake_case ( self : Tuple ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : str ): lowercase__ : List[Any] = torch.manual_seed(0 ) lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"] lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-2 def snake_case ( self : Union[str, Any] ): lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) lowercase__ : Dict = ["vase", "umbrella"] lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = torch.manual_seed(0 ) lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-1
81
1
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return x + 2 class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Union[str, Any] ): lowercase__ : Union[str, Any] = "x = 3" lowercase__ : List[str] = {} lowercase__ : str = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3} ) lowercase__ : Dict = "x = y" lowercase__ : int = {"y": 5} lowercase__ : List[Any] = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 5, "y": 5} ) def snake_case ( self : Any ): lowercase__ : List[Any] = "y = add_two(x)" lowercase__ : Any = {"x": 3} lowercase__ : List[Any] = evaluate(SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3, "y": 5} ) # Won't work without the tool with CaptureStdout() as out: lowercase__ : Tuple = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE ) assert result is None assert "tried to execute add_two" in out.out def snake_case ( self : Dict ): lowercase__ : int = "x = 3" lowercase__ : Dict = {} lowercase__ : Tuple = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3} ) def snake_case ( self : Optional[int] ): lowercase__ : Dict = "test_dict = {'x': x, 'y': add_two(x)}" lowercase__ : int = {"x": 3} lowercase__ : str = evaluate(SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=SCREAMING_SNAKE_CASE ) self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3, "y": 5} ) self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def snake_case ( self : Dict ): lowercase__ : Any = "x = 3\ny = 5" lowercase__ : Optional[Any] = {} lowercase__ : List[str] = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3, "y": 5} ) def snake_case ( self : Dict ): lowercase__ : Union[str, Any] = "text = f'This is x: {x}.'" lowercase__ : str = {"x": 3} lowercase__ : Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3, "text": "This is x: 3."} ) def snake_case ( self : str ): lowercase__ : Any = "if x <= 3:\n y = 2\nelse:\n y = 5" lowercase__ : Any = {"x": 3} lowercase__ : Any = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3, "y": 2} ) lowercase__ : Any = {"x": 8} lowercase__ : Any = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 8, "y": 5} ) def snake_case ( self : Any ): lowercase__ : Any = "test_list = [x, add_two(x)]" lowercase__ : Tuple = {"x": 3} lowercase__ : str = evaluate(SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , [3, 5] ) self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3, "test_list": [3, 5]} ) def snake_case ( self : Any ): lowercase__ : List[Any] = "y = x" lowercase__ : Union[str, Any] = {"x": 3} lowercase__ : List[str] = evaluate(SCREAMING_SNAKE_CASE , {} , state=SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3, "y": 3} ) def snake_case ( self : str ): lowercase__ : Optional[Any] = "test_list = [x, add_two(x)]\ntest_list[1]" lowercase__ : str = {"x": 3} lowercase__ : Optional[Any] = evaluate(SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3, "test_list": [3, 5]} ) lowercase__ : Optional[int] = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" lowercase__ : str = {"x": 3} lowercase__ : Optional[Any] = evaluate(SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def snake_case ( self : str ): lowercase__ : Any = "x = 0\nfor i in range(3):\n x = i" lowercase__ : Dict = {} lowercase__ : Tuple = evaluate(SCREAMING_SNAKE_CASE , {"range": range} , state=SCREAMING_SNAKE_CASE ) assert result == 2 self.assertDictEqual(SCREAMING_SNAKE_CASE , {"x": 2, "i": 2} )
81
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = (CMStochasticIterativeScheduler,) lowercase_ = 1_0 def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ): lowercase__ : Any = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**SCREAMING_SNAKE_CASE ) return config def snake_case ( self : Optional[int] ): lowercase__ : Tuple = 10 lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) lowercase__ : Any = scheduler.timesteps[0] lowercase__ : Optional[int] = scheduler.timesteps[1] lowercase__ : List[Any] = self.dummy_sample lowercase__ : Tuple = 0.1 * sample lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def snake_case ( self : Dict ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : Any = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Any = 1 scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = scheduler.timesteps lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : List[str] = self.dummy_model() lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(SCREAMING_SNAKE_CASE ): # 1. scale model input lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Dict = pred_prev_sample lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 192.7_614 ) < 1E-2 assert abs(result_mean.item() - 0.2_510 ) < 1E-3 def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config() lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = [106, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = scheduler.timesteps lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : Optional[int] = self.dummy_model() lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Union[str, Any] = pred_prev_sample lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 347.6_357 ) < 1E-2 assert abs(result_mean.item() - 0.4_527 ) < 1E-3 def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config() lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : int = [39, 30, 12, 15, 0] with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : Dict = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0] lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE ) with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
81
1
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowerCAmelCase__ = ['''bert-base-uncased''', '''bert-base-cased'''] lowerCAmelCase__ = '''hf-internal-testing/tiny-bert-tf-only''' if is_tf_available(): class snake_case__(tf.keras.Model ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE : str ): super().__init__() lowercase__ : Dict = tokenizer lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = TFAutoModel.from_config(SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Any = self.tokenizer(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = self.bert(**SCREAMING_SNAKE_CASE ) return out["pooler_output"] @require_tf @require_tensorflow_text class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : List[str] ): super().setUp() lowercase__ : List[str] = [ BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false lowercase__ : Any = [TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , use_fast_bert_tokenizer=SCREAMING_SNAKE_CASE ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowercase__ : Union[str, Any] = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] lowercase__ : str = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def snake_case ( self : Tuple ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): lowercase__ : Any = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="tf" , padding="longest" ) lowercase__ : List[Any] = tf_tokenizer(SCREAMING_SNAKE_CASE ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def snake_case ( self : Any ): for tf_tokenizer in self.tf_tokenizers: lowercase__ : List[Any] = tf_tokenizer(self.paired_sentences ) lowercase__ : int = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def snake_case ( self : List[str] ): for tf_tokenizer in self.tf_tokenizers: lowercase__ : Any = tf.function(SCREAMING_SNAKE_CASE ) for test_inputs in (self.test_sentences, self.paired_sentences): lowercase__ : Any = tf.constant(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = compiled_tokenizer(SCREAMING_SNAKE_CASE ) lowercase__ : str = tf_tokenizer(SCREAMING_SNAKE_CASE ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def snake_case ( self : Tuple ): for tf_tokenizer in self.tf_tokenizers: lowercase__ : Tuple = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = tf.convert_to_tensor(self.test_sentences ) lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowercase__ : str = Path(SCREAMING_SNAKE_CASE ) / "saved.model" model.save(SCREAMING_SNAKE_CASE ) lowercase__ : Any = tf.keras.models.load_model(SCREAMING_SNAKE_CASE ) lowercase__ : Any = loaded_model(SCREAMING_SNAKE_CASE ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
81
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class snake_case__: """simple docstring""" lowercase_ = 42 # setable values lowercase_ = 42 lowercase_ = 42 lowercase_ = None @classmethod def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ): return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE ) @dataclass class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = 42 class snake_case__(_UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers] lowercase_ = 42 @property def snake_case ( self : Dict ): return True @register_to_config def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ): lowercase__ : List[Any] = dtype def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ): if common is None: lowercase__ : Dict = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype ) lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ): return sample def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ): lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ): lowercase__ : Tuple = state.common.alphas_cumprod[t] lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: lowercase__ : Dict = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) ) elif variance_type == "fixed_large": lowercase__ : Union[str, Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log lowercase__ : List[Any] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": lowercase__ : List[Any] = variance lowercase__ : Union[str, Any] = state.common.betas[t] lowercase__ : Tuple = (predicted_variance + 1) / 2 lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log return variance def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ): lowercase__ : Tuple = timestep if key is None: lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 ) else: lowercase__ : Any = None # 1. compute alphas, betas lowercase__ : Dict = state.common.alphas_cumprod[t] lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) lowercase__ : Optional[Any] = 1 - alpha_prod_t lowercase__ : Optional[int] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowercase__ : Optional[Any] = model_output elif self.config.prediction_type == "v_prediction": lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """ " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 ) lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) lowercase__ : Optional[int] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ): return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ): return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __len__( self : Tuple ): return self.config.num_train_timesteps
81
1
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : int = F"""{sampling_rate}""" lowercase__ : List[Any] = "1" lowercase__ : Optional[int] = "f32le" lowercase__ : Tuple = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(lowerCamelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: lowercase__ : Any = ffmpeg_process.communicate(lowerCamelCase__ ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error lowercase__ : List[Any] = output_stream[0] lowercase__ : List[str] = np.frombuffer(lowerCamelCase__ , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = "f32le" , ): """simple docstring""" lowercase__ : List[str] = F"""{sampling_rate}""" lowercase__ : Union[str, Any] = "1" if format_for_conversion == "s16le": lowercase__ : Dict = 2 elif format_for_conversion == "f32le": lowercase__ : Optional[int] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) lowercase__ : Optional[int] = platform.system() if system == "Linux": lowercase__ : Optional[int] = "alsa" lowercase__ : List[str] = "default" elif system == "Darwin": lowercase__ : Union[str, Any] = "avfoundation" lowercase__ : int = ":0" elif system == "Windows": lowercase__ : Optional[Any] = "dshow" lowercase__ : List[Any] = "default" lowercase__ : List[str] = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] lowercase__ : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowercase__ : Optional[int] = _ffmpeg_stream(lowerCamelCase__ , lowerCamelCase__ ) for item in iterator: yield item def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "f32le" , ): """simple docstring""" if stream_chunk_s is not None: lowercase__ : int = stream_chunk_s else: lowercase__ : Optional[int] = chunk_length_s lowercase__ : int = ffmpeg_microphone(lowerCamelCase__ , lowerCamelCase__ , format_for_conversion=lowerCamelCase__ ) if format_for_conversion == "s16le": lowercase__ : Union[str, Any] = np.intaa lowercase__ : Optional[int] = 2 elif format_for_conversion == "f32le": lowercase__ : int = np.floataa lowercase__ : List[str] = 4 else: raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: lowercase__ : int = chunk_length_s / 6 lowercase__ : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase__ , (int, float) ): lowercase__ : Optional[Any] = [stride_length_s, stride_length_s] lowercase__ : List[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowercase__ : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowercase__ : int = datetime.datetime.now() lowercase__ : str = datetime.timedelta(seconds=lowerCamelCase__ ) for item in chunk_bytes_iter(lowerCamelCase__ , lowerCamelCase__ , stride=(stride_left, stride_right) , stream=lowerCamelCase__ ): # Put everything back in numpy scale lowercase__ : Tuple = np.frombuffer(item["raw"] , dtype=lowerCamelCase__ ) lowercase__ : Tuple = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) lowercase__ : Dict = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" lowercase__ : List[Any] = b"" lowercase__ , lowercase__ : Optional[int] = stride if stride_left + stride_right >= chunk_len: raise ValueError( F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) lowercase__ : List[str] = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase__ ) < chunk_len: lowercase__ : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase__ ) >= chunk_len: # We are flushing the accumulator lowercase__ : Union[str, Any] = (_stride_left, stride_right) lowercase__ : Optional[int] = {"raw": acc[:chunk_len], "stride": stride} if stream: lowercase__ : List[str] = False yield item lowercase__ : Optional[Any] = stride_left lowercase__ : Any = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase__ ) > stride_left: lowercase__ : str = {"raw": acc, "stride": (_stride_left, 0)} if stream: lowercase__ : str = False yield item def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Optional[int] = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase__ , stdout=subprocess.PIPE , bufsize=lowerCamelCase__ ) as ffmpeg_process: while True: lowercase__ : Optional[Any] = ffmpeg_process.stdout.read(lowerCamelCase__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
81
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ): super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: lowercase__ : Optional[Any] = ( f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) lowercase__ : int = dict(scheduler.config ) lowercase__ : Any = 1 lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: lowercase__ : Optional[Any] = ( f"""The configuration file of this scheduler: {scheduler} has not set the configuration""" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = dict(scheduler.config ) lowercase__ : Union[str, Any] = True lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE ) if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase__ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) lowercase__ : Union[str, Any] = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case ( self : Optional[Any] ): if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ): lowercase__ : Dict = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE ) lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size ) # Run inpainting pipeline with the generated mask lowercase__ : int = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
81
1
def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if not nums: # Makes sure that the list is not empty raise ValueError("List is empty" ) lowercase__ : Any = sum(lowerCamelCase__ ) / len(lowerCamelCase__ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
81
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] lowercase__ : str = True if "large" in model_name or "huge" in model_name else False lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowercase__ : int = [3, 3, 3, 3] lowercase__ : Tuple = [5, 5, 5, 5] elif "fl4" in model_name: lowercase__ : Optional[Any] = [4, 4, 4, 4] lowercase__ : Optional[Any] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowercase__ : Union[str, Any] = [3, 3, 3, 3] if "lrf" in model_name: lowercase__ : Union[str, Any] = [3, 3, 3, 3] else: lowercase__ : Tuple = [2, 2, 2, 2] if "tiny" in model_name: lowercase__ : Optional[Any] = 96 elif "small" in model_name: lowercase__ : List[str] = 96 elif "base" in model_name: lowercase__ : str = 128 elif "large" in model_name: lowercase__ : Any = 192 elif "xlarge" in model_name: lowercase__ : str = 256 elif "huge" in model_name: lowercase__ : List[str] = 352 # set label information lowercase__ : Tuple = "huggingface/label-files" if "large" in model_name or "huge" in model_name: lowercase__ : List[Any] = "imagenet-22k-id2label.json" else: lowercase__ : Optional[int] = "imagenet-1k-id2label.json" lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : int = {v: k for k, v in idalabel.items()} lowercase__ : str = FocalNetConfig( embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , ) return config def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "patch_embed.proj" in name: lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: lowercase__ : List[str] = "encoder." + name if "encoder.layers" in name: lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: lowercase__ : List[str] = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": lowercase__ : List[str] = "layernorm.weight" if name == "norm.bias": lowercase__ : List[Any] = "layernorm.bias" if "head" in name: lowercase__ : Optional[int] = name.replace("head" , "classifier" ) else: lowercase__ : Union[str, Any] = "focalnet." + name return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): """simple docstring""" lowercase__ : List[Any] = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on lowercase__ : Union[str, Any] = model_name_to_url[model_name] print("Checkpoint URL: " , lowerCamelCase__ ) lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ ) lowercase__ : List[str] = val lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ ) lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ ) model.eval() # load state dict model.load_state_dict(lowerCamelCase__ ) # verify conversion lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : int = BitImageProcessor( do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , ) lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" ) lowercase__ : Any = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 ) lowercase__ : List[Any] = model(**lowerCamelCase__ ) lowercase__ : int = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase__ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
81
1
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json lowerCAmelCase__ = '''sshleifer/mar_enro_6_3_student''' class snake_case__(_UpperCamelCase ): """simple docstring""" def snake_case ( self : List[str] ): super().setUp() lowercase__ : Dict = cached_path( "https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=SCREAMING_SNAKE_CASE , ) lowercase__ : Optional[Any] = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k""" @slow @require_torch_gpu def snake_case ( self : List[Any] ): MarianMTModel.from_pretrained(SCREAMING_SNAKE_CASE ) @slow @require_torch_gpu def snake_case ( self : Union[str, Any] ): lowercase__ : str = { "$MAX_LEN": 64, "$BS": 64, "$GAS": 1, "$ENRO_DIR": self.data_dir, "facebook/mbart-large-cc25": MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", "--learning_rate=3e-5": "--learning_rate 3e-4", "--num_train_epochs 6": "--num_train_epochs 1", } # Clean up bash script lowercase__ : List[Any] = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip() lowercase__ : Optional[Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" ) for k, v in env_vars_to_replace.items(): lowercase__ : List[Any] = bash_script.replace(SCREAMING_SNAKE_CASE , str(SCREAMING_SNAKE_CASE ) ) lowercase__ : Any = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") lowercase__ : Any = f""" --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 """.split() # XXX: args.gpus > 1 : handle multi_gpu in the future lowercase__ : Optional[Any] = ["finetune.py"] + bash_script.split() + args with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ): lowercase__ : List[Any] = argparse.ArgumentParser() lowercase__ : Any = pl.Trainer.add_argparse_args(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = SummarizationModule.add_model_specific_args(SCREAMING_SNAKE_CASE , os.getcwd() ) lowercase__ : str = parser.parse_args() lowercase__ : Optional[Any] = main(SCREAMING_SNAKE_CASE ) # Check metrics lowercase__ : Dict = load_json(model.metrics_save_path ) lowercase__ : int = metrics["val"][0] lowercase__ : List[Any] = metrics["val"][-1] self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , SCREAMING_SNAKE_CASE ) self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats["val_avg_bleu"] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict lowercase__ : Optional[Any] = os.listdir(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = [x for x in contents if x.endswith(".ckpt" )][0] lowercase__ : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE ) lowercase__ : int = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" ) lowercase__ : Union[str, Any] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: lowercase__ : Optional[int] = {os.path.basename(SCREAMING_SNAKE_CASE ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["test"] ) == 1 class snake_case__(_UpperCamelCase ): """simple docstring""" @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def snake_case ( self : List[Any] ): lowercase__ : Union[str, Any] = f"""{self.test_file_dir_str}/test_data/wmt_en_ro""" lowercase__ : List[str] = { "--fp16_opt_level=O1": "", "$MAX_LEN": 128, "$BS": 16, "$GAS": 1, "$ENRO_DIR": data_dir, "$m": "sshleifer/student_marian_en_ro_6_1", "val_check_interval=0.25": "val_check_interval=1.0", } # Clean up bash script lowercase__ : Union[str, Any] = ( (self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip() ) lowercase__ : Optional[Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" ) lowercase__ : int = bash_script.replace("--fp16 " , " " ) for k, v in env_vars_to_replace.items(): lowercase__ : int = bash_script.replace(SCREAMING_SNAKE_CASE , str(SCREAMING_SNAKE_CASE ) ) lowercase__ : str = self.get_auto_remove_tmp_dir() lowercase__ : Optional[Any] = bash_script.replace("--fp16" , "" ) lowercase__ : str = 6 lowercase__ : Union[str, Any] = ( ["distillation.py"] + bash_script.split() + [ f"""--output_dir={output_dir}""", "--gpus=1", "--learning_rate=1e-3", f"""--num_train_epochs={epochs}""", "--warmup_steps=10", "--val_check_interval=1.0", "--do_predict", ] ) with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ): lowercase__ : str = argparse.ArgumentParser() lowercase__ : Optional[Any] = pl.Trainer.add_argparse_args(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = SummarizationDistiller.add_model_specific_args(SCREAMING_SNAKE_CASE , os.getcwd() ) lowercase__ : str = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu lowercase__ : Any = distill_main(SCREAMING_SNAKE_CASE ) # Check metrics lowercase__ : List[Any] = load_json(model.metrics_save_path ) lowercase__ : Any = metrics["val"][0] lowercase__ : List[str] = metrics["val"][-1] assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , SCREAMING_SNAKE_CASE ) # check lightning ckpt can be loaded and has a reasonable statedict lowercase__ : List[Any] = os.listdir(SCREAMING_SNAKE_CASE ) lowercase__ : Any = [x for x in contents if x.endswith(".ckpt" )][0] lowercase__ : str = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" ) lowercase__ : Any = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: lowercase__ : Optional[Any] = {os.path.basename(SCREAMING_SNAKE_CASE ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["test"] ) == 1
81
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """informer""" lowercase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ): # time series specific configuration lowercase__ : Any = prediction_length lowercase__ : List[str] = context_length or prediction_length lowercase__ : Tuple = distribution_output lowercase__ : Union[str, Any] = loss lowercase__ : Union[str, Any] = input_size lowercase__ : List[str] = num_time_features lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowercase__ : List[str] = scaling lowercase__ : str = num_dynamic_real_features lowercase__ : Tuple = num_static_real_features lowercase__ : List[str] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Dict = cardinality else: lowercase__ : Dict = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Union[str, Any] = embedding_dimension else: lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase__ : Dict = num_parallel_samples # Transformer architecture configuration lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features lowercase__ : Optional[Any] = d_model lowercase__ : int = encoder_attention_heads lowercase__ : Tuple = decoder_attention_heads lowercase__ : List[Any] = encoder_ffn_dim lowercase__ : List[str] = decoder_ffn_dim lowercase__ : List[str] = encoder_layers lowercase__ : Tuple = decoder_layers lowercase__ : Union[str, Any] = dropout lowercase__ : List[Any] = attention_dropout lowercase__ : str = activation_dropout lowercase__ : int = encoder_layerdrop lowercase__ : Union[str, Any] = decoder_layerdrop lowercase__ : Tuple = activation_function lowercase__ : str = init_std lowercase__ : Tuple = use_cache # Informer lowercase__ : Union[str, Any] = attention_type lowercase__ : Union[str, Any] = sampling_factor lowercase__ : Tuple = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def snake_case ( self : str ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
81
1
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" print("\nThe shortest path matrix using Floyd Warshall algorithm\n" ) for i in range(lowerCamelCase__ ): for j in range(lowerCamelCase__ ): if dist[i][j] != float("inf" ): print(int(dist[i][j] ) , end="\t" ) else: print("INF" , end="\t" ) print() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : List[str] = [[float("inf" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ ): for j in range(lowerCamelCase__ ): lowercase__ : Tuple = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(lowerCamelCase__ ): # looping through rows of graph array for i in range(lowerCamelCase__ ): # looping through columns of graph array for j in range(lowerCamelCase__ ): if ( dist[i][k] != float("inf" ) and dist[k][j] != float("inf" ) and dist[i][k] + dist[k][j] < dist[i][j] ): lowercase__ : List[Any] = dist[i][k] + dist[k][j] _print_dist(lowerCamelCase__ , lowerCamelCase__ ) return dist, v if __name__ == "__main__": lowerCAmelCase__ = int(input('''Enter number of vertices: ''')) lowerCAmelCase__ = int(input('''Enter number of edges: ''')) lowerCAmelCase__ = [[float('''inf''') for i in range(v)] for j in range(v)] for i in range(v): lowerCAmelCase__ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('''\nEdge ''', i + 1) lowerCAmelCase__ = int(input('''Enter source:''')) lowerCAmelCase__ = int(input('''Enter destination:''')) lowerCAmelCase__ = float(input('''Enter weight:''')) lowerCAmelCase__ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
81
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) else: lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) lowercase__ : int = ["key_proj", "value_proj", "query_proj"] lowercase__ : str = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: lowercase__ : Union[str, Any] = key.split("." ) if attributes[0] == "lm_head": lowercase__ : Tuple = prophet lowercase__ : Tuple = prophet_old else: lowercase__ : Tuple = prophet.prophetnet lowercase__ : List[str] = prophet_old.model lowercase__ : int = False for attribute in attributes: if attribute in mapping: lowercase__ : int = mapping[attribute] if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0: lowercase__ : Dict = attribute elif hasattr(lowerCamelCase__ , lowerCamelCase__ ): lowercase__ : Optional[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowercase__ : Any = old_model.weight logger.info(F"""{attribute} is initialized.""" ) lowercase__ : str = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowercase__ : Tuple = old_model.bias logger.info(F"""{attribute} is initialized""" ) lowercase__ : str = True break elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ): lowercase__ : str = old_model.in_proj_weight.shape[0] // 3 lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowercase__ : Tuple = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) lowercase__ : Union[str, Any] = True break if attribute.isdigit(): lowercase__ : str = model[int(lowerCamelCase__ )] lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )] else: lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ ) if old_attribute == "": lowercase__ : str = old_model else: if not hasattr(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError(F"""{old_model} does not have {old_attribute}""" ) lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ ) if not is_key_init: raise ValueError(F"""{key} was not correctly initialized!""" ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
81
1
import unittest from knapsack import knapsack as k class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : List[Any] ): lowercase__ : List[Any] = 0 lowercase__ : Union[str, Any] = [0] lowercase__ : List[str] = [0] lowercase__ : str = len(SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 0 ) lowercase__ : Tuple = [60] lowercase__ : List[str] = [10] lowercase__ : str = len(SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 0 ) def snake_case ( self : Union[str, Any] ): lowercase__ : str = 3 lowercase__ : List[Any] = [1, 2, 3] lowercase__ : Optional[Any] = [3, 2, 1] lowercase__ : str = len(SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 5 ) def snake_case ( self : Tuple ): lowercase__ : List[str] = 50 lowercase__ : Tuple = [60, 100, 120] lowercase__ : Optional[Any] = [10, 20, 30] lowercase__ : str = len(SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 220 ) if __name__ == "__main__": unittest.main()
81
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = GPTaTokenizer lowercase_ = GPTaTokenizerFast lowercase_ = True lowercase_ = {"""add_prefix_space""": True} lowercase_ = False def snake_case ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : List[str] = {"unk_token": "<unk>"} lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ): kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : List[str] = "lower newer" lowercase__ : Optional[Any] = "lower newer" return input_text, output_text def snake_case ( self : Any ): lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__ : Dict = "lower newer" lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokens + [tokenizer.unk_token] lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if not self.test_rust_tokenizer: return lowercase__ : Dict = self.get_tokenizer() lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" # Testing tokenization lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids without special tokens lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids with special tokens lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing the unknown token lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token] lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # Simple input lowercase__ : Dict = "This is a simple input" lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[int] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) def snake_case ( self : Any ): lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input lowercase__ : Optional[int] = "This is a simple input" lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"] lowercase__ : List[Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[Any] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] lowercase__ : Any = tokenizer.pad_token_id lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def snake_case ( self : str ): lowercase__ : List[str] = "$$$" lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = "This is a simple input" lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Optional[int] = tokenizer.bos_token_id lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids ) lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def snake_case ( self : Optional[int] ): pass def snake_case ( self : Tuple ): # TODO: change to self.get_tokenizers() when the fast version is implemented lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase__ : str = "Encode this." lowercase__ : List[Any] = "This one too please." lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.encode_plus( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , ) lowercase__ : Tuple = encoded_sequence_dict["input_ids"] lowercase__ : int = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) lowercase__ : List[str] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE ) ] lowercase__ : Any = [x for x in filtered_sequence if x is not None] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @require_tokenizers class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Union[str, Any] ): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("test_opt" ) lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" ) lowercase__ : Dict = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE ) lowercase__ : int = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # Same as above self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def snake_case ( self : Tuple ): lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "bos" lowercase__ : List[Any] = tokenizer.get_vocab()["bos"] lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Union[str, Any] = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # We changed the bos token self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("./tok" ) lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
81
1
import requests lowerCAmelCase__ = '''YOUR API KEY''' def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = giphy_api_key ): """simple docstring""" lowercase__ : Union[str, Any] = "+".join(query.split() ) lowercase__ : List[str] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}""" lowercase__ : int = requests.get(lowerCamelCase__ ).json()["data"] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('''\n'''.join(get_gifs('''space ship''')))
81
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
1
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging lowerCAmelCase__ = logging.get_logger(__name__) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if isinstance(lowerCamelCase__ , np.ndarray ): return list(tensor.shape ) lowercase__ : Dict = tf.shape(lowerCamelCase__ ) if tensor.shape == tf.TensorShape(lowerCamelCase__ ): return dynamic lowercase__ : List[Any] = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(lowerCamelCase__ )] def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ): """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9 , axis=lowerCamelCase__ , name=lowerCamelCase__ ) def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__=-1 ): """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." ) # Get mean and variance on the axis to be normalized lowercase__ , lowercase__ : Any = tf.nn.moments(lowerCamelCase__ , axes=[axis] , keepdims=lowerCamelCase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase__ : List[str] = [1] * inputs.shape.rank lowercase__ : Dict = shape_list(lowerCamelCase__ )[axis] lowercase__ : Optional[Any] = tf.reshape(lowerCamelCase__ , lowerCamelCase__ ) lowercase__ : Optional[int] = tf.reshape(lowerCamelCase__ , lowerCamelCase__ ) # Compute layer normalization using the batch_normalization # function. lowercase__ : List[str] = tf.nn.batch_normalization( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , offset=lowerCamelCase__ , scale=lowerCamelCase__ , variance_epsilon=lowerCamelCase__ , ) return outputs def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=0 , lowerCamelCase__=-1 ): """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase__ : Optional[int] = tf.shape(lowerCamelCase__ ) lowercase__ : Tuple = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase__ : Optional[int] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(lowerCamelCase__ , lowerCamelCase__ ) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if not isinstance(lowerCamelCase__ , tf.Tensor ): lowercase__ : Optional[int] = tf.convert_to_tensor(lowerCamelCase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase__ : List[str] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase__ : str = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase__ : Optional[int] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = "input_ids" ): """simple docstring""" tf.debugging.assert_less( lowerCamelCase__ , tf.cast(lowerCamelCase__ , dtype=tensor.dtype ) , message=( F"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCamelCase__ )}) must be smaller than the embedding """ F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ) , ) def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : List[Any] = 64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase__ : int = [x for x in data if len(lowerCamelCase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ F"""bytes: {bad_attributes}""" ) lowercase__ : Union[str, Any] = np.asarray(lowerCamelCase__ ) lowercase__ : Tuple = 1 lowercase__ : Optional[int] = np.array_split(lowerCamelCase__ , lowerCamelCase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase__ : List[Any] = np.array_split(lowerCamelCase__ , lowerCamelCase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(lowerCamelCase__ ): lowercase__ : Optional[Any] = chunk_data else: lowercase__ : List[Any] = data def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if name in group.attrs: lowercase__ : Optional[int] = [n.decode("utf8" ) if hasattr(lowerCamelCase__ , "decode" ) else n for n in group.attrs[name]] else: lowercase__ : Union[str, Any] = [] lowercase__ : str = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("utf8" ) if hasattr(lowerCamelCase__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] ) chunk_id += 1 return data def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" def _expand_single_ad_tensor(lowerCamelCase__ ): if isinstance(lowerCamelCase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(lowerCamelCase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , lowerCamelCase__ )
81
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case__: """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ): lowercase__ : Union[str, Any] = parent lowercase__ : Optional[int] = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : List[Any] = patch_size lowercase__ : Any = num_channels lowercase__ : Optional[int] = is_training lowercase__ : Dict = use_labels lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : Union[str, Any] = attention_probs_dropout_prob lowercase__ : List[Any] = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : Optional[int] = mask_ratio lowercase__ : Union[str, Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase__ : List[Any] = (image_size // patch_size) ** 2 lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def snake_case ( self : int ): lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : str = None if self.use_labels: lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any] = self.get_config() return config, pixel_values, labels def snake_case ( self : Tuple ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ): lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) # expected sequence length = num_patches lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2 lowercase__ : List[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase__ : Dict = 1 lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def snake_case ( self : Optional[int] ): lowercase__ : int = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs lowercase__ : str = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def snake_case ( self : List[str] ): lowercase__ : List[Any] = TFViTMAEModelTester(self ) lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def snake_case ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : Optional[int] ): lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowercase__ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) ) def snake_case ( self : Optional[Any] ): lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Union[str, Any] = [*signature.parameters.keys()] lowercase__ : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): # make the mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = outputs_dict[0].numpy() lowercase__ : Optional[int] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def snake_case ( self : str ): # make the mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : Tuple = {} for k, v in inputs_dict.items(): if tf.is_tensor(SCREAMING_SNAKE_CASE ): lowercase__ : Any = v.numpy() else: lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE ) return inputs_np_dict for model_class in self.all_model_classes: lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ): # make masks reproducible np.random.seed(2 ) lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase__ : Optional[int] = tf_noise super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(SCREAMING_SNAKE_CASE ) if module_member_name.endswith("MainLayer" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )] for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE ) } lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) inputs_dict.update({"noise": noise} ) for main_layer_class in tf_main_layer_classes: lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) ) lowercase__ : str = model(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" ) model.save(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = tf.keras.models.load_model( SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model ) lowercase__ : Dict = model(SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def snake_case ( self : Optional[int] ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) if model_class.__name__ == "TFViTMAEModel": lowercase__ : str = outputs.last_hidden_state.numpy() lowercase__ : Optional[Any] = 0 else: lowercase__ : Optional[Any] = outputs.logits.numpy() lowercase__ : Optional[int] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) if model_class.__name__ == "TFViTMAEModel": lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy() lowercase__ : Optional[int] = 0 else: lowercase__ : str = after_outputs["logits"].numpy() lowercase__ : Tuple = 0 lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 ) def snake_case ( self : List[Any] ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : str = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(SCREAMING_SNAKE_CASE ) lowercase__ : int = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowercase__ : Any = model_class.from_config(model.config ) lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model new_model.set_weights(model.get_weights() ) lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def snake_case ( self : List[Any] ): pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def snake_case ( self : str ): pass @slow def snake_case ( self : List[Any] ): lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self : Any ): return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def snake_case ( self : Union[str, Any] ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ) lowercase__ : Optional[Any] = self.default_image_processor lowercase__ : Union[str, Any] = prepare_img() lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase__ : Union[str, Any] = ViTMAEConfig() lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) ) # forward pass lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) # verify the logits lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = tf.convert_to_tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
81
1
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class snake_case__(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" @register_to_config def __init__( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = False , ): super().__init__() lowercase__ : List[Any] = nn.Embedding(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : int = nn.Embedding(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = False lowercase__ : Optional[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = TaConfig( vocab_size=SCREAMING_SNAKE_CASE , d_model=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE , feed_forward_proj=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , ) lowercase__ : List[str] = nn.ModuleList() for lyr_num in range(SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = TaBlock(SCREAMING_SNAKE_CASE ) self.encoders.append(SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = TaLayerNorm(SCREAMING_SNAKE_CASE ) lowercase__ : Any = nn.Dropout(p=SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ): lowercase__ : Tuple = self.token_embedder(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = encoder_input_tokens.shape[1] lowercase__ : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device ) x += self.position_encoding(SCREAMING_SNAKE_CASE ) lowercase__ : str = self.dropout_pre(SCREAMING_SNAKE_CASE ) # inverted the attention mask lowercase__ : str = encoder_input_tokens.size() lowercase__ : Dict = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for lyr in self.encoders: lowercase__ : str = lyr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] lowercase__ : int = self.layer_norm(SCREAMING_SNAKE_CASE ) return self.dropout_post(SCREAMING_SNAKE_CASE ), encoder_inputs_mask
81
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO Update this lowerCAmelCase__ = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """esm""" def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = vocab_size lowercase__ : int = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : List[str] = initializer_range lowercase__ : Optional[Any] = layer_norm_eps lowercase__ : Optional[int] = position_embedding_type lowercase__ : Optional[int] = use_cache lowercase__ : Optional[int] = emb_layer_norm_before lowercase__ : List[str] = token_dropout lowercase__ : Optional[int] = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) lowercase__ : Dict = EsmFoldConfig() elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE ) lowercase__ : Dict = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) lowercase__ : List[str] = get_default_vocab_list() else: lowercase__ : List[Any] = vocab_list else: lowercase__ : List[Any] = None lowercase__ : List[str] = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def snake_case ( self : List[str] ): lowercase__ : Optional[Any] = super().to_dict() if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ): lowercase__ : Dict = self.esmfold_config.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = None lowercase_ = True lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = 0 lowercase_ = True lowercase_ = False lowercase_ = 1_2_8 lowercase_ = None def snake_case ( self : Optional[int] ): if self.trunk is None: lowercase__ : Dict = TrunkConfig() elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ): lowercase__ : int = TrunkConfig(**self.trunk ) def snake_case ( self : Union[str, Any] ): lowercase__ : int = asdict(self ) lowercase__ : Any = self.trunk.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = 4_8 lowercase_ = 1_0_2_4 lowercase_ = 1_2_8 lowercase_ = 3_2 lowercase_ = 3_2 lowercase_ = 3_2 lowercase_ = 0 lowercase_ = 0 lowercase_ = False lowercase_ = 4 lowercase_ = 1_2_8 lowercase_ = None def snake_case ( self : Dict ): if self.structure_module is None: lowercase__ : str = StructureModuleConfig() elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def snake_case ( self : Optional[Any] ): lowercase__ : int = asdict(self ) lowercase__ : Optional[int] = self.structure_module.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = 3_8_4 lowercase_ = 1_2_8 lowercase_ = 1_6 lowercase_ = 1_2_8 lowercase_ = 1_2 lowercase_ = 4 lowercase_ = 8 lowercase_ = 0.1 lowercase_ = 8 lowercase_ = 1 lowercase_ = 2 lowercase_ = 7 lowercase_ = 1_0 lowercase_ = 1e-8 lowercase_ = 1e5 def snake_case ( self : Dict ): return asdict(self ) def __lowerCamelCase ( ): """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
81
1
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''', } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """align_text_model""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=30_522 , SCREAMING_SNAKE_CASE : int=768 , SCREAMING_SNAKE_CASE : Tuple=12 , SCREAMING_SNAKE_CASE : Optional[int]=12 , SCREAMING_SNAKE_CASE : str=3_072 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Any=512 , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Dict=1E-1_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Optional[int]=True , **SCREAMING_SNAKE_CASE : str , ): super().__init__(**SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = vocab_size lowercase__ : Tuple = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : Dict = num_attention_heads lowercase__ : Tuple = hidden_act lowercase__ : Optional[Any] = intermediate_size lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : Union[str, Any] = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : Optional[Any] = type_vocab_size lowercase__ : List[Any] = initializer_range lowercase__ : int = layer_norm_eps lowercase__ : List[str] = position_embedding_type lowercase__ : Optional[int] = use_cache lowercase__ : Union[str, Any] = pad_token_id @classmethod def snake_case ( cls : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : str ): cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": lowercase__ : Optional[int] = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """align_vision_model""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 600 , SCREAMING_SNAKE_CASE : float = 2.0 , SCREAMING_SNAKE_CASE : float = 3.1 , SCREAMING_SNAKE_CASE : int = 8 , SCREAMING_SNAKE_CASE : List[int] = [3, 3, 5, 3, 5, 5, 3] , SCREAMING_SNAKE_CASE : List[int] = [32, 16, 24, 40, 80, 112, 192] , SCREAMING_SNAKE_CASE : List[int] = [16, 24, 40, 80, 112, 192, 320] , SCREAMING_SNAKE_CASE : List[int] = [] , SCREAMING_SNAKE_CASE : List[int] = [1, 2, 2, 2, 1, 2, 1] , SCREAMING_SNAKE_CASE : List[int] = [1, 2, 2, 3, 3, 4, 1] , SCREAMING_SNAKE_CASE : List[int] = [1, 6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE : float = 0.25 , SCREAMING_SNAKE_CASE : str = "swish" , SCREAMING_SNAKE_CASE : int = 2_560 , SCREAMING_SNAKE_CASE : str = "mean" , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : float = 0.001 , SCREAMING_SNAKE_CASE : float = 0.99 , SCREAMING_SNAKE_CASE : float = 0.2 , **SCREAMING_SNAKE_CASE : Optional[Any] , ): super().__init__(**SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = num_channels lowercase__ : List[Any] = image_size lowercase__ : Optional[Any] = width_coefficient lowercase__ : List[str] = depth_coefficient lowercase__ : Optional[int] = depth_divisor lowercase__ : Optional[int] = kernel_sizes lowercase__ : int = in_channels lowercase__ : Optional[int] = out_channels lowercase__ : Any = depthwise_padding lowercase__ : List[Any] = strides lowercase__ : Optional[int] = num_block_repeats lowercase__ : List[Any] = expand_ratios lowercase__ : Optional[int] = squeeze_expansion_ratio lowercase__ : List[str] = hidden_act lowercase__ : List[str] = hidden_dim lowercase__ : List[Any] = pooling_type lowercase__ : Any = initializer_range lowercase__ : Union[str, Any] = batch_norm_eps lowercase__ : Optional[Any] = batch_norm_momentum lowercase__ : Any = drop_connect_rate lowercase__ : Optional[int] = sum(SCREAMING_SNAKE_CASE ) * 4 @classmethod def snake_case ( cls : Tuple , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : List[str] ): cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ : Union[str, Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": lowercase__ : Tuple = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """align""" lowercase_ = True def __init__( self : Any , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Tuple=640 , SCREAMING_SNAKE_CASE : Union[str, Any]=1.0 , SCREAMING_SNAKE_CASE : str=0.02 , **SCREAMING_SNAKE_CASE : Any , ): super().__init__(**SCREAMING_SNAKE_CASE ) if text_config is None: lowercase__ : Tuple = {} logger.info("text_config is None. Initializing the AlignTextConfig with default values." ) if vision_config is None: lowercase__ : List[Any] = {} logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." ) lowercase__ : List[Any] = AlignTextConfig(**SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = AlignVisionConfig(**SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = projection_dim lowercase__ : List[Any] = temperature_init_value lowercase__ : List[Any] = initializer_range @classmethod def snake_case ( cls : Tuple , SCREAMING_SNAKE_CASE : AlignTextConfig , SCREAMING_SNAKE_CASE : AlignVisionConfig , **SCREAMING_SNAKE_CASE : Tuple ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): lowercase__ : List[Any] = copy.deepcopy(self.__dict__ ) lowercase__ : Optional[Any] = self.text_config.to_dict() lowercase__ : List[Any] = self.vision_config.to_dict() lowercase__ : List[Any] = self.__class__.model_type return output
81
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """deformable_detr""" lowercase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : List[Any] = backbone_config.get("model_type" ) lowercase__ : Any = CONFIG_MAPPING[backbone_model_type] lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE ) lowercase__ : int = use_timm_backbone lowercase__ : Optional[Any] = backbone_config lowercase__ : Union[str, Any] = num_channels lowercase__ : List[Any] = num_queries lowercase__ : List[Any] = max_position_embeddings lowercase__ : Union[str, Any] = d_model lowercase__ : Union[str, Any] = encoder_ffn_dim lowercase__ : Optional[Any] = encoder_layers lowercase__ : Optional[Any] = encoder_attention_heads lowercase__ : Optional[Any] = decoder_ffn_dim lowercase__ : List[Any] = decoder_layers lowercase__ : Optional[int] = decoder_attention_heads lowercase__ : str = dropout lowercase__ : Union[str, Any] = attention_dropout lowercase__ : List[str] = activation_dropout lowercase__ : Optional[Any] = activation_function lowercase__ : Optional[Any] = init_std lowercase__ : str = init_xavier_std lowercase__ : Any = encoder_layerdrop lowercase__ : int = auxiliary_loss lowercase__ : Dict = position_embedding_type lowercase__ : int = backbone lowercase__ : Optional[Any] = use_pretrained_backbone lowercase__ : List[Any] = dilation # deformable attributes lowercase__ : Dict = num_feature_levels lowercase__ : Optional[int] = encoder_n_points lowercase__ : Any = decoder_n_points lowercase__ : int = two_stage lowercase__ : int = two_stage_num_proposals lowercase__ : Union[str, Any] = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowercase__ : List[Any] = class_cost lowercase__ : Optional[int] = bbox_cost lowercase__ : Any = giou_cost # Loss coefficients lowercase__ : List[str] = mask_loss_coefficient lowercase__ : int = dice_loss_coefficient lowercase__ : Any = bbox_loss_coefficient lowercase__ : Any = giou_loss_coefficient lowercase__ : Optional[int] = eos_coefficient lowercase__ : int = focal_alpha lowercase__ : Dict = disable_custom_kernels super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def snake_case ( self : List[Any] ): return self.encoder_attention_heads @property def snake_case ( self : Union[str, Any] ): return self.d_model def snake_case ( self : str ): lowercase__ : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowercase__ : int = self.backbone_config.to_dict() lowercase__ : Union[str, Any] = self.__class__.model_type return output
81
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase__ = { '''configuration_chinese_clip''': [ '''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ChineseCLIPConfig''', '''ChineseCLIPOnnxConfig''', '''ChineseCLIPTextConfig''', '''ChineseCLIPVisionConfig''', ], '''processing_chinese_clip''': ['''ChineseCLIPProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ChineseCLIPFeatureExtractor'''] lowerCAmelCase__ = ['''ChineseCLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ChineseCLIPModel''', '''ChineseCLIPPreTrainedModel''', '''ChineseCLIPTextModel''', '''ChineseCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowerCAmelCase__ = logging.get_logger(__name__) class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = ["""pixel_values"""] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ): super().__init__(**SCREAMING_SNAKE_CASE ) lowercase__ : str = do_rescale lowercase__ : Optional[Any] = rescale_factor lowercase__ : Any = do_pad lowercase__ : Optional[Any] = pad_size def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ): return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ): lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height lowercase__ : List[Any] = (old_width // size + 1) * size - old_width return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ): lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : str = do_pad if do_pad is not None else self.do_pad lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_pad: lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images] lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] lowercase__ : Optional[Any] = {"pixel_values": images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
81
1
class snake_case__: """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE : str = "" , SCREAMING_SNAKE_CASE : bool = False ): # Mapping from the first character of the prefix of the node lowercase__ : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word lowercase__ : Optional[int] = is_leaf lowercase__ : Tuple = prefix def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str ): lowercase__ : Union[str, Any] = 0 for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def snake_case ( self : str , SCREAMING_SNAKE_CASE : list[str] ): for word in words: self.insert(SCREAMING_SNAKE_CASE ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : str ): # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: lowercase__ : Optional[Any] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: lowercase__ : List[str] = RadixNode(prefix=SCREAMING_SNAKE_CASE , is_leaf=SCREAMING_SNAKE_CASE ) else: lowercase__ : Any = self.nodes[word[0]] lowercase__ , lowercase__ , lowercase__ : str = incoming_node.match( SCREAMING_SNAKE_CASE ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: lowercase__ : Any = remaining_prefix lowercase__ : List[str] = self.nodes[matching_string[0]] lowercase__ : Union[str, Any] = RadixNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = aux_node if remaining_word == "": lowercase__ : Optional[Any] = True else: self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ): lowercase__ : Optional[Any] = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE ) if not incoming_node: return False else: lowercase__ , lowercase__ , lowercase__ : str = incoming_node.match( SCREAMING_SNAKE_CASE ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(SCREAMING_SNAKE_CASE ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : str ): lowercase__ : Union[str, Any] = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE ) if not incoming_node: return False else: lowercase__ , lowercase__ , lowercase__ : int = incoming_node.match( SCREAMING_SNAKE_CASE ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(SCREAMING_SNAKE_CASE ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: lowercase__ : str = list(self.nodes.values() )[0] lowercase__ : Any = merging_node.is_leaf self.prefix += merging_node.prefix lowercase__ : Dict = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: lowercase__ : Dict = False # If there is 1 edge, we merge it with its child else: lowercase__ : List[Any] = list(incoming_node.nodes.values() )[0] lowercase__ : Tuple = merging_node.is_leaf incoming_node.prefix += merging_node.prefix lowercase__ : str = merging_node.nodes return True def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : int = 0 ): if self.prefix != "": print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : List[str] = "banana bananas bandana band apple all beast".split() lowercase__ : Optional[int] = RadixNode() root.insert_many(lowerCamelCase__ ) assert all(root.find(lowerCamelCase__ ) for word in words ) assert not root.find("bandanas" ) assert not root.find("apps" ) root.delete("all" ) assert not root.find("all" ) root.delete("banana" ) assert not root.find("banana" ) assert root.find("bananas" ) return True def __lowerCamelCase ( ): """simple docstring""" assert test_trie() def __lowerCamelCase ( ): """simple docstring""" lowercase__ : List[str] = RadixNode() lowercase__ : List[Any] = "banana bananas bandanas bandana band apple all beast".split() root.insert_many(lowerCamelCase__ ) print("Words:" , lowerCamelCase__ ) print("Tree:" ) root.print_tree() if __name__ == "__main__": main()
81
import argparse import json from tqdm import tqdm def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , ) parser.add_argument( "--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , ) parser.add_argument( "--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , ) lowercase__ : Dict = parser.parse_args() with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open( args.gold_data_path , "w" ) as gold_file: lowercase__ : List[str] = json.load(lowerCamelCase__ ) for dpr_record in tqdm(lowerCamelCase__ ): lowercase__ : Any = dpr_record["question"] lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + "\n" ) gold_file.write("\t".join(lowerCamelCase__ ) + "\n" ) if __name__ == "__main__": main()
81
1
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''', } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """mvp""" lowercase_ = ["""past_key_values"""] lowercase_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=50_267 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Tuple=12 , SCREAMING_SNAKE_CASE : List[str]=4_096 , SCREAMING_SNAKE_CASE : List[str]=16 , SCREAMING_SNAKE_CASE : str=12 , SCREAMING_SNAKE_CASE : List[Any]=4_096 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : Optional[Any]=1_024 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : str=0.0 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : str=0.0 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Any=0 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : List[Any]=100 , SCREAMING_SNAKE_CASE : str=800 , **SCREAMING_SNAKE_CASE : Tuple , ): lowercase__ : Any = vocab_size lowercase__ : str = max_position_embeddings lowercase__ : Union[str, Any] = d_model lowercase__ : Optional[int] = encoder_ffn_dim lowercase__ : Optional[Any] = encoder_layers lowercase__ : Optional[Any] = encoder_attention_heads lowercase__ : Optional[int] = decoder_ffn_dim lowercase__ : Union[str, Any] = decoder_layers lowercase__ : Tuple = decoder_attention_heads lowercase__ : Dict = dropout lowercase__ : Union[str, Any] = attention_dropout lowercase__ : Optional[int] = activation_dropout lowercase__ : Dict = activation_function lowercase__ : Dict = init_std lowercase__ : str = encoder_layerdrop lowercase__ : int = decoder_layerdrop lowercase__ : List[str] = classifier_dropout lowercase__ : str = use_cache lowercase__ : List[str] = encoder_layers lowercase__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ : Any = use_prompt lowercase__ : int = prompt_length lowercase__ : Optional[Any] = prompt_mid_dim super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , forced_eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." )
81
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ = logging.getLogger(__name__) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : str = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , ) parser.add_argument( "--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , ) parser.add_argument( "--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , ) parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] ) parser.add_argument( "--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , ) parser.add_argument( "--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." , ) parser.add_argument( "--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." , ) lowercase__ : Optional[int] = parser.parse_args() return args def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" def fn(lowerCamelCase__ ): return tokenizer(examples["text"] ) return fn def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : str = [] for i in range(len(tokenized_data["input_ids"] ) ): lowercase__ : str = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ ) lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ ) lowercase__ : str = example.SerializeToString() records.append(lowerCamelCase__ ) return records def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit ) lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) ) print(F"""Limiting the dataset to {args.limit} entries.""" ) lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) lowercase__ : Any = os.path.join(args.output_dir , args.split ) if not os.path.exists(lowerCamelCase__ ): os.makedirs(lowerCamelCase__ ) else: lowercase__ : str = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. lowercase__ : str = tokenize_function(lowerCamelCase__ ) lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowerCamelCase__ ): # Concatenate all texts. lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()} lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 lowercase__ : List[str] = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. lowercase__ : Optional[int] = { k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )] for k, t in concatenated_examples.items() } return result lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 ) lowercase__ : str = 0 lowercase__ : str = 0 for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ): lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size] lowercase__ : str = len(dataset_snapshot["input_ids"] ) lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" ) lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ ) with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file: for i in range(len(lowerCamelCase__ ) ): lowercase__ : Optional[int] = serialized_examples[i] out_file.write(lowerCamelCase__ ) print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) ) shard_count += 1 total_records += records_containing with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f: print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = parse_args() main(args)
81
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__: """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ): lowercase__ : Union[str, Any] = parent lowercase__ : Optional[int] = batch_size lowercase__ : Optional[Any] = image_size lowercase__ : Tuple = num_channels lowercase__ : Tuple = num_stages lowercase__ : List[Any] = hidden_sizes lowercase__ : Any = depths lowercase__ : List[str] = is_training lowercase__ : int = use_labels lowercase__ : Union[str, Any] = intermediate_size lowercase__ : List[Any] = hidden_act lowercase__ : Tuple = num_labels lowercase__ : Optional[Any] = initializer_range lowercase__ : Optional[Any] = out_features lowercase__ : Union[str, Any] = out_indices lowercase__ : Tuple = scope def snake_case ( self : Dict ): lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Dict = None if self.use_labels: lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def snake_case ( self : Tuple ): return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase__ : str = None lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def snake_case ( self : Dict ): lowercase__ : str = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs lowercase__ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict def snake_case ( self : Optional[Any] ): lowercase__ : Optional[Any] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase_ = ( {"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def snake_case ( self : List[Any] ): lowercase__ : List[str] = ConvNextVaModelTester(self ) lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def snake_case ( self : Optional[int] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : List[str] ): return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def snake_case ( self : Dict ): pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def snake_case ( self : Union[str, Any] ): pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : Optional[int] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() lowercase__ : List[str] = True if model_class.__name__ in [ *get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE ), ]: continue lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.train() lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss loss.backward() def snake_case ( self : Optional[Any] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels() lowercase__ : Optional[Any] = False lowercase__ : Dict = True if ( model_class.__name__ in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )] or not model_class.supports_gradient_checkpointing ): continue lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.gradient_checkpointing_enable() model.train() lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss loss.backward() def snake_case ( self : int ): lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : str = [*signature.parameters.keys()] lowercase__ : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict ): lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ): lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Dict = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Optional[Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) @slow def snake_case ( self : List[str] ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self : List[Any] ): return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = self.default_image_processor lowercase__ : int = prepare_img() lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE ) # verify the logits lowercase__ : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
81
1
from collections.abc import Iterable from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('''_T''') class snake_case__(Generic[_T] ): """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE : Iterable[_T] | None = None ): lowercase__ : list[_T] = list(iterable or [] ) lowercase__ : list[_T] = [] def __len__( self : Optional[int] ): return len(self._stacka ) + len(self._stacka ) def __repr__( self : Tuple ): return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})""" def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : _T ): self._stacka.append(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] ): lowercase__ : str = self._stacka.pop lowercase__ : Dict = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("Queue is empty" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
81
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class snake_case__(_UpperCamelCase ): """simple docstring""" @slow @require_torch def snake_case ( self : Any ): lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" ) lowercase__ : str = bertabert.config.encoder.vocab_size lowercase__ : List[str] = tokenizer.sep_token_id lowercase__ : Optional[Any] = tokenizer.cls_token_id lowercase__ : int = 128 lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) lowercase__ : Tuple = train_dataset.select(range(32 ) ) lowercase__ : Optional[int] = val_dataset.select(range(16 ) ) lowercase__ : int = 4 def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 ) lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 ) lowercase__ : Tuple = inputs.input_ids lowercase__ : Optional[int] = inputs.attention_mask lowercase__ : int = outputs.input_ids lowercase__ : Dict = outputs.input_ids.copy() lowercase__ : int = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] lowercase__ : List[Any] = outputs.attention_mask assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids ) assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ): lowercase__ : Union[str, Any] = pred.label_ids lowercase__ : Dict = pred.predictions # all unnecessary tokens are removed lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE ) return {"accuracy": accuracy} # map train dataset lowercase__ : List[str] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset lowercase__ : Any = val_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) lowercase__ : List[str] = self.get_auto_remove_tmp_dir() lowercase__ : int = SeqaSeqTrainingArguments( output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowercase__ : str = SeqaSeqTrainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , ) # start training trainer.train()
81
1
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : str , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ): warnings.warn( "The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PoolFormerImageProcessor instead." , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
81
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowercase__ : Tuple = 192 lowercase__ : List[Any] = 768 lowercase__ : Tuple = 12 lowercase__ : List[str] = 3 lowercase__ : List[Any] = [800, 1_333] lowercase__ : Union[str, Any] = False elif yolos_name == "yolos_s_dWr": lowercase__ : str = 330 lowercase__ : List[Any] = 14 lowercase__ : Tuple = 6 lowercase__ : Optional[int] = 1_320 elif "yolos_s" in yolos_name: lowercase__ : Dict = 384 lowercase__ : str = 1_536 lowercase__ : List[Any] = 12 lowercase__ : List[Any] = 6 elif "yolos_b" in yolos_name: lowercase__ : int = [800, 1_344] lowercase__ : Tuple = 91 lowercase__ : Optional[int] = "huggingface/label-files" lowercase__ : Optional[int] = "coco-detection-id2label.json" lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : List[Any] = idalabel lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :] lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size] lowercase__ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : str = in_proj_weight[-config.hidden_size :, :] lowercase__ : Tuple = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "backbone" in name: lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" ) if "cls_token" in name: lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: lowercase__ : int = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowercase__ : Optional[int] = name.replace("attn" , "attention.self" ) if "norm1" in name: lowercase__ : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowercase__ : int = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" ) return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ ) if "qkv" in key: lowercase__ : Dict = key.split("." ) lowercase__ : List[Any] = int(key_split[2] ) lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowercase__ : str = val[:dim, :] lowercase__ : int = val[ dim : dim * 2, : ] lowercase__ : str = val[-dim:, :] else: lowercase__ : Tuple = val[:dim] lowercase__ : Any = val[dim : dim * 2] lowercase__ : Optional[Any] = val[-dim:] else: lowercase__ : Optional[Any] = val return orig_state_dict def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ ) # load original state_dict lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"] # load 🤗 model lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ ) model.eval() lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image, prepared by YolosImageProcessor lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512 lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ ) lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" ) lowercase__ : int = model(**lowerCamelCase__ ) lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes lowercase__ , lowercase__ : int = None, None if yolos_name == "yolos_ti": lowercase__ : Optional[int] = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) lowercase__ : Dict = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": lowercase__ : Any = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) lowercase__ : List[str] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": lowercase__ : Dict = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) lowercase__ : Tuple = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": lowercase__ : Optional[Any] = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) lowercase__ : int = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": lowercase__ : List[str] = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) lowercase__ : List[str] = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: lowercase__ : Tuple = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) lowercase__ : Optional[int] = model_mapping[yolos_name] image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" ) model.push_to_hub(lowerCamelCase__ , organization="hustvl" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
81
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class snake_case__(_UpperCamelCase ): """simple docstring""" @slow @require_torch def snake_case ( self : Any ): lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" ) lowercase__ : str = bertabert.config.encoder.vocab_size lowercase__ : List[str] = tokenizer.sep_token_id lowercase__ : Optional[Any] = tokenizer.cls_token_id lowercase__ : int = 128 lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) lowercase__ : Tuple = train_dataset.select(range(32 ) ) lowercase__ : Optional[int] = val_dataset.select(range(16 ) ) lowercase__ : int = 4 def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 ) lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 ) lowercase__ : Tuple = inputs.input_ids lowercase__ : Optional[int] = inputs.attention_mask lowercase__ : int = outputs.input_ids lowercase__ : Dict = outputs.input_ids.copy() lowercase__ : int = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] lowercase__ : List[Any] = outputs.attention_mask assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids ) assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ): lowercase__ : Union[str, Any] = pred.label_ids lowercase__ : Dict = pred.predictions # all unnecessary tokens are removed lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE ) return {"accuracy": accuracy} # map train dataset lowercase__ : List[str] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset lowercase__ : Any = val_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) lowercase__ : List[str] = self.get_auto_remove_tmp_dir() lowercase__ : int = SeqaSeqTrainingArguments( output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowercase__ : str = SeqaSeqTrainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , ) # start training trainer.train()
81
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''], '''processing_mgp_str''': ['''MgpstrProcessor'''], '''tokenization_mgp_str''': ['''MgpstrTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MgpstrModel''', '''MgpstrPreTrainedModel''', '''MgpstrForSceneTextRecognition''', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
1
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class snake_case__: """simple docstring""" @staticmethod def snake_case ( *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): pass @is_pipeline_test @require_vision @require_torch class snake_case__(unittest.TestCase ): """simple docstring""" lowercase_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ): lowercase__ : List[Any] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) lowercase__ : Union[str, Any] = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ): lowercase__ : Optional[Any] = object_detector(examples[0] , threshold=0.0 ) lowercase__ : int = len(SCREAMING_SNAKE_CASE ) self.assertGreater(SCREAMING_SNAKE_CASE , 0 ) self.assertEqual( SCREAMING_SNAKE_CASE , [ { "score": ANY(SCREAMING_SNAKE_CASE ), "label": ANY(SCREAMING_SNAKE_CASE ), "box": {"xmin": ANY(SCREAMING_SNAKE_CASE ), "ymin": ANY(SCREAMING_SNAKE_CASE ), "xmax": ANY(SCREAMING_SNAKE_CASE ), "ymax": ANY(SCREAMING_SNAKE_CASE )}, } for i in range(SCREAMING_SNAKE_CASE ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def snake_case ( self : List[str] ): pass @require_torch def snake_case ( self : int ): lowercase__ : List[str] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) lowercase__ : Union[str, Any] = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] , ) lowercase__ : List[Any] = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ] , ) @require_torch @slow def snake_case ( self : int ): lowercase__ : List[str] = pipeline("zero-shot-object-detection" ) lowercase__ : Dict = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ] , ) lowercase__ : Union[str, Any] = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ [ {"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def snake_case ( self : Any ): pass @require_torch @slow def snake_case ( self : Union[str, Any] ): lowercase__ : Tuple = 0.2 lowercase__ : Tuple = pipeline("zero-shot-object-detection" ) lowercase__ : Dict = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=SCREAMING_SNAKE_CASE , ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ] , ) @require_torch @slow def snake_case ( self : Any ): lowercase__ : Dict = 2 lowercase__ : List[Any] = pipeline("zero-shot-object-detection" ) lowercase__ : Tuple = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=SCREAMING_SNAKE_CASE , ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [ {"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ] , )
81
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Optional[Any] ): lowercase__ : Dict = tempfile.mkdtemp() # fmt: off lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) lowercase__ : Tuple = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def snake_case ( self : Any ): lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case ( self : int ): lowercase__ : Optional[int] = self.get_tokenizer() lowercase__ : List[Any] = self.get_rust_tokenizer() lowercase__ : List[str] = self.get_image_processor() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_slow.save_pretrained(self.tmpdirname ) lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_fast.save_pretrained(self.tmpdirname ) lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] ): lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : int = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.prepare_image_inputs() lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case ( self : str ): lowercase__ : Tuple = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Tuple = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = "lower newer" lowercase__ : str = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE ): processor() def snake_case ( self : Optional[Any] ): lowercase__ : Dict = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : List[str] = self.get_image_processor() lowercase__ : List[str] = self.get_tokenizer() lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = "lower newer" lowercase__ : Union[str, Any] = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
81
1
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Dict ): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights lowercase__ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ) lowercase__ : str = [t[-1] for t in os.walk(os.path.join(SCREAMING_SNAKE_CASE , os.listdir(SCREAMING_SNAKE_CASE )[0] , "snapshots" ) )] lowercase__ : Union[str, Any] = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin" ) for f in files ) @slow @require_flax class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Dict ): lowercase__ , lowercase__ : Any = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) lowercase__ : Dict = jax.random.PRNGKey(0 ) lowercase__ : str = 4 lowercase__ : Tuple = jax.device_count() lowercase__ : Tuple = num_samples * [prompt] lowercase__ : Union[str, Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE ) # shard inputs and rng lowercase__ : Union[str, Any] = replicate(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Dict = shard(SCREAMING_SNAKE_CASE ) lowercase__ : Any = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3 assert np.abs(np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1 lowercase__ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(SCREAMING_SNAKE_CASE ) == num_samples def snake_case ( self : Dict ): lowercase__ , lowercase__ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) lowercase__ : Optional[Any] = jax.random.PRNGKey(0 ) lowercase__ : str = 50 lowercase__ : List[str] = jax.device_count() lowercase__ : Any = num_samples * [prompt] lowercase__ : List[Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE ) # shard inputs and rng lowercase__ : List[Any] = replicate(SCREAMING_SNAKE_CASE ) lowercase__ : str = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = shard(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3 assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1 def snake_case ( self : int ): lowercase__ , lowercase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) lowercase__ : Any = jax.random.PRNGKey(0 ) lowercase__ : List[Any] = 50 lowercase__ : List[Any] = jax.device_count() lowercase__ : Union[str, Any] = num_samples * [prompt] lowercase__ : Optional[int] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE ) # shard inputs and rng lowercase__ : Dict = replicate(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : str = shard(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3 assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1 def snake_case ( self : Optional[int] ): lowercase__ , lowercase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa ) lowercase__ : int = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) lowercase__ : int = jax.random.PRNGKey(0 ) lowercase__ : Union[str, Any] = 50 lowercase__ : Optional[Any] = jax.device_count() lowercase__ : Optional[int] = num_samples * [prompt] lowercase__ : int = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE ) # shard inputs and rng lowercase__ : int = replicate(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = shard(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3 assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1 def snake_case ( self : Optional[int] ): lowercase__ : int = FlaxDDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=SCREAMING_SNAKE_CASE , steps_offset=1 , ) lowercase__ , lowercase__ : str = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , ) lowercase__ : List[Any] = scheduler.create_state() lowercase__ : Optional[Any] = scheduler_state lowercase__ : int = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) lowercase__ : int = jax.random.PRNGKey(0 ) lowercase__ : Union[str, Any] = 50 lowercase__ : Optional[int] = jax.device_count() lowercase__ : Tuple = num_samples * [prompt] lowercase__ : Dict = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE ) # shard inputs and rng lowercase__ : Optional[Any] = replicate(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Dict = shard(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3 assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1 def snake_case ( self : List[Any] ): lowercase__ : Optional[int] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) lowercase__ : List[str] = jax.device_count() lowercase__ : Any = num_samples * [prompt] lowercase__ : int = jax.random.split(jax.random.PRNGKey(0 ) , SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE , ) lowercase__ : Union[str, Any] = replicate(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = shard(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images assert images.shape == (num_samples, 1, 512, 512, 3) lowercase__ : Union[str, Any] = images[2, 0, 256, 10:17, 1] # With memory efficient attention lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE , use_memory_efficient_attention=SCREAMING_SNAKE_CASE , ) lowercase__ : List[Any] = replicate(SCREAMING_SNAKE_CASE ) lowercase__ : int = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = shard(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) lowercase__ : List[Any] = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
81
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : int ): lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : str = -1 lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : int = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = -1 lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer.decode(greedy_ids[0] ) lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE ) thread.start() lowercase__ : List[Any] = "" for new_text in streamer: streamer_text += new_text self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = -1 lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :] lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : Optional[Any] = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" ) lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = -1 lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n" lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def snake_case ( self : Optional[int] ): lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : int = -1 lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 ) lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(SCREAMING_SNAKE_CASE ): lowercase__ : List[str] = "" for new_text in streamer: streamer_text += new_text
81
1
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file lowerCAmelCase__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def __lowerCamelCase ( lowerCamelCase__=None ): """simple docstring""" if subparsers is not None: lowercase__ : int = subparsers.add_parser("tpu-config" , description=_description ) else: lowercase__ : Tuple = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description ) # Core arguments lowercase__ : int = parser.add_argument_group( "Config Arguments" , "Arguments that can be configured through `accelerate config`." ) config_args.add_argument( "--config_file" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="Path to the config file to use for accelerate." , ) config_args.add_argument( "--tpu_name" , default=lowerCamelCase__ , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , ) config_args.add_argument( "--tpu_zone" , default=lowerCamelCase__ , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , ) lowercase__ : str = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." ) pod_args.add_argument( "--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , ) pod_args.add_argument( "--command_file" , default=lowerCamelCase__ , help="The path to the file containing the commands to run on the pod on startup." , ) pod_args.add_argument( "--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , ) pod_args.add_argument( "--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , ) pod_args.add_argument( "--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , ) pod_args.add_argument( "--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." ) if subparsers is not None: parser.set_defaults(func=lowerCamelCase__ ) return parser def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Union[str, Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(lowerCamelCase__ ): lowercase__ : List[Any] = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: lowercase__ : Union[str, Any] = defaults.command_file if not args.command and defaults.commands is not None: lowercase__ : List[str] = defaults.commands if not args.tpu_name: lowercase__ : Union[str, Any] = defaults.tpu_name if not args.tpu_zone: lowercase__ : List[Any] = defaults.tpu_zone if args.accelerate_version == "dev": lowercase__ : List[Any] = "git+https://github.com/huggingface/accelerate.git" elif args.accelerate_version == "latest": lowercase__ : Any = "accelerate -U" elif isinstance(parse(args.accelerate_version ) , lowerCamelCase__ ): lowercase__ : Tuple = F"""accelerate=={args.accelerate_version}""" if not args.command_file and not args.command: raise ValueError("You must specify either a command file or a command to run on the pod." ) if args.command_file: with open(args.command_file , "r" ) as f: lowercase__ : Any = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , lowerCamelCase__ ): lowercase__ : Any = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate lowercase__ : List[str] = ["cd /usr/share"] if args.install_accelerate: new_cmd += [F"""pip install {args.accelerate_version}"""] new_cmd += args.command lowercase__ : List[Any] = "; ".join(lowerCamelCase__ ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess lowercase__ : List[Any] = ["gcloud"] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F"""Running {" ".join(lowerCamelCase__ )}""" ) return subprocess.run(lowerCamelCase__ ) print("Successfully setup pod." ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Union[str, Any] = tpu_command_parser() lowercase__ : Any = parser.parse_args() tpu_command_launcher(lowerCamelCase__ )
81
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = 42 class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ): super().__init__() lowercase__ : str = layers_per_block lowercase__ : int = torch.nn.Convad( SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Union[str, Any] = None lowercase__ : Optional[int] = nn.ModuleList([] ) # down lowercase__ : Dict = block_out_channels[0] for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ : List[str] = output_channel lowercase__ : Dict = block_out_channels[i] lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1 lowercase__ : Union[str, Any] = get_down_block( SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) self.down_blocks.append(SCREAMING_SNAKE_CASE ) # mid lowercase__ : Optional[int] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) # out lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 ) lowercase__ : Union[str, Any] = nn.SiLU() lowercase__ : Tuple = 2 * out_channels if double_z else out_channels lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 ) lowercase__ : Tuple = False def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : List[str] = x lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE ) if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ): def custom_forward(*SCREAMING_SNAKE_CASE : Dict ): return module(*SCREAMING_SNAKE_CASE ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) # middle lowercase__ : int = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) else: for down_block in self.down_blocks: lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) # middle lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE ) else: # down for down_block in self.down_blocks: lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE ) # middle lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE ) # post-process lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE ) return sample class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ): super().__init__() lowercase__ : List[str] = layers_per_block lowercase__ : int = nn.Convad( SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Optional[Any] = None lowercase__ : Dict = nn.ModuleList([] ) lowercase__ : List[str] = in_channels if norm_type == "spatial" else None # mid lowercase__ : str = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) # up lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) ) lowercase__ : Dict = reversed_block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ : Tuple = output_channel lowercase__ : List[Any] = reversed_block_out_channels[i] lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1 lowercase__ : Dict = get_up_block( SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , ) self.up_blocks.append(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = output_channel # out if norm_type == "spatial": lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE ) else: lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 ) lowercase__ : Union[str, Any] = nn.SiLU() lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 ) lowercase__ : List[Any] = False def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ): lowercase__ : Tuple = z lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ): def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ): return module(*SCREAMING_SNAKE_CASE ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle lowercase__ : List[str] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) else: # middle lowercase__ : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # middle lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # post-process if latent_embeds is None: lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE ) else: lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE ) return sample class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ): super().__init__() lowercase__ : List[Any] = n_e lowercase__ : List[str] = vq_embed_dim lowercase__ : Optional[Any] = beta lowercase__ : List[str] = legacy lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) lowercase__ : Union[str, Any] = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) lowercase__ : Tuple = self.used.shape[0] lowercase__ : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": lowercase__ : Any = self.re_embed lowercase__ : Tuple = self.re_embed + 1 print( f"""Remapping {self.n_e} indices to {self.re_embed} indices. """ f"""Using {self.unknown_index} for unknown indices.""" ) else: lowercase__ : str = n_e lowercase__ : Union[str, Any] = sane_index_shape def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Any = inds.shape assert len(SCREAMING_SNAKE_CASE ) > 1 lowercase__ : List[str] = inds.reshape(ishape[0] , -1 ) lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long() lowercase__ : Dict = match.argmax(-1 ) lowercase__ : Dict = match.sum(2 ) < 1 if self.unknown_index == "random": lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: lowercase__ : List[Any] = self.unknown_index return new.reshape(SCREAMING_SNAKE_CASE ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ): lowercase__ : List[Any] = inds.shape assert len(SCREAMING_SNAKE_CASE ) > 1 lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 ) lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE ) if self.re_embed > self.used.shape[0]: # extra token lowercase__ : int = 0 # simply set to zero lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE ) return back.reshape(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ): # reshape z -> (batch, height, width, channel) and flatten lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous() lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 ) lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape ) lowercase__ : Dict = None lowercase__ : int = None # compute loss for embedding if not self.legacy: lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients lowercase__ : Union[str, Any] = z + (z_q - z).detach() # reshape back to match original input shape lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ): # shape specifying (batch, height, width, channel) if self.remap is not None: lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again # get quantized latent vectors lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE ) if shape is not None: lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE ) # reshape back to match original input shape lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ): lowercase__ : Dict = parameters lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 ) lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 ) lowercase__ : Optional[int] = deterministic lowercase__ : Tuple = torch.exp(0.5 * self.logvar ) lowercase__ : Optional[int] = torch.exp(self.logvar ) if self.deterministic: lowercase__ : Any = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype lowercase__ : Tuple = randn_tensor( self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype ) lowercase__ : str = self.mean + self.std * sample return x def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) lowercase__ : Any = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple ): return self.mean
81
1
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ , lowercase__ : str = image.size lowercase__ , lowercase__ : Tuple = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowercase__ : Optional[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) lowercase__ : Optional[Any] = np.array(lowerCamelCase__ ).astype(np.floataa ) / 255.0 lowercase__ : Any = image[None].transpose(0 , 3 , 1 , 2 ) lowercase__ : List[Any] = torch.from_numpy(lowerCamelCase__ ) return 2.0 * image - 1.0 class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE : VQModel , SCREAMING_SNAKE_CASE : UNetaDModel , SCREAMING_SNAKE_CASE : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self : Any , SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : Optional[int] = 100 , SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , ): if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ): lowercase__ : int = 1 elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ): lowercase__ : int = image.shape[0] else: raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE )}""" ) if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ): lowercase__ : Optional[int] = preprocess(SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ : List[str] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image lowercase__ : List[str] = (batch_size, self.unet.config.in_channels // 2, height, width) lowercase__ : Union[str, Any] = next(self.unet.parameters() ).dtype lowercase__ : Tuple = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = image.to(device=self.device , dtype=SCREAMING_SNAKE_CASE ) # set timesteps and move to the correct device self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=self.device ) lowercase__ : Union[str, Any] = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler lowercase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase__ : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowercase__ : List[Any] = {} if accepts_eta: lowercase__ : Union[str, Any] = eta for t in self.progress_bar(SCREAMING_SNAKE_CASE ): # concat latents and low resolution image in the channel dimension. lowercase__ : List[Any] = torch.cat([latents, image] , dim=1 ) lowercase__ : int = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # predict the noise residual lowercase__ : str = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # compute the previous noisy sample x_t -> x_t-1 lowercase__ : Tuple = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample # decode the image latents with the VQVAE lowercase__ : Tuple = self.vqvae.decode(SCREAMING_SNAKE_CASE ).sample lowercase__ : Dict = torch.clamp(SCREAMING_SNAKE_CASE , -1.0 , 1.0 ) lowercase__ : List[str] = image / 2 + 0.5 lowercase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : Any = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
81
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = DiTPipeline lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowercase_ = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowercase_ = False def snake_case ( self : int ): torch.manual_seed(0 ) lowercase__ : Optional[Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , ) lowercase__ : Dict = AutoencoderKL() lowercase__ : Any = DDIMScheduler() lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ): if str(SCREAMING_SNAKE_CASE ).startswith("mps" ): lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) lowercase__ : int = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def snake_case ( self : Any ): lowercase__ : List[Any] = "cpu" lowercase__ : str = self.get_dummy_components() lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE ) pipe.to(SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images lowercase__ : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 ) def snake_case ( self : str ): self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def snake_case ( self : Tuple ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : str ): lowercase__ : List[Any] = torch.manual_seed(0 ) lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"] lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-2 def snake_case ( self : Union[str, Any] ): lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) lowercase__ : Dict = ["vase", "umbrella"] lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = torch.manual_seed(0 ) lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-1
81
1
lowerCAmelCase__ = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' lowerCAmelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowerCAmelCase__ = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
81
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = (CMStochasticIterativeScheduler,) lowercase_ = 1_0 def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ): lowercase__ : Any = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**SCREAMING_SNAKE_CASE ) return config def snake_case ( self : Optional[int] ): lowercase__ : Tuple = 10 lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) lowercase__ : Any = scheduler.timesteps[0] lowercase__ : Optional[int] = scheduler.timesteps[1] lowercase__ : List[Any] = self.dummy_sample lowercase__ : Tuple = 0.1 * sample lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def snake_case ( self : Dict ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : Any = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Any = 1 scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = scheduler.timesteps lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : List[str] = self.dummy_model() lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(SCREAMING_SNAKE_CASE ): # 1. scale model input lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Dict = pred_prev_sample lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 192.7_614 ) < 1E-2 assert abs(result_mean.item() - 0.2_510 ) < 1E-3 def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config() lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = [106, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = scheduler.timesteps lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : Optional[int] = self.dummy_model() lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Union[str, Any] = pred_prev_sample lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 347.6_357 ) < 1E-2 assert abs(result_mean.item() - 0.4_527 ) < 1E-3 def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config() lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : int = [39, 30, 12, 15, 0] with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : Dict = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0] lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE ) with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
81
1
import doctest from collections import deque import numpy as np class snake_case__: """simple docstring""" def __init__( self : str ): lowercase__ : Tuple = [2, 1, 2, -1] lowercase__ : List[str] = [1, 2, 3, 4] def snake_case ( self : str ): lowercase__ : int = len(self.first_signal ) lowercase__ : int = len(self.second_signal ) lowercase__ : Optional[Any] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # create a zero matrix of max_length x max_length lowercase__ : Tuple = [[0] * max_length for i in range(SCREAMING_SNAKE_CASE )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = deque(self.second_signal ) rotated_signal.rotate(SCREAMING_SNAKE_CASE ) for j, item in enumerate(SCREAMING_SNAKE_CASE ): matrix[i][j] += item # multiply the matrix with the first signal lowercase__ : List[str] = np.matmul(np.transpose(SCREAMING_SNAKE_CASE ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(SCREAMING_SNAKE_CASE , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
81
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class snake_case__: """simple docstring""" lowercase_ = 42 # setable values lowercase_ = 42 lowercase_ = 42 lowercase_ = None @classmethod def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ): return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE ) @dataclass class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = 42 class snake_case__(_UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers] lowercase_ = 42 @property def snake_case ( self : Dict ): return True @register_to_config def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ): lowercase__ : List[Any] = dtype def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ): if common is None: lowercase__ : Dict = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype ) lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ): return sample def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ): lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ): lowercase__ : Tuple = state.common.alphas_cumprod[t] lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: lowercase__ : Dict = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) ) elif variance_type == "fixed_large": lowercase__ : Union[str, Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log lowercase__ : List[Any] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": lowercase__ : List[Any] = variance lowercase__ : Union[str, Any] = state.common.betas[t] lowercase__ : Tuple = (predicted_variance + 1) / 2 lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log return variance def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ): lowercase__ : Tuple = timestep if key is None: lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 ) else: lowercase__ : Any = None # 1. compute alphas, betas lowercase__ : Dict = state.common.alphas_cumprod[t] lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) lowercase__ : Optional[Any] = 1 - alpha_prod_t lowercase__ : Optional[int] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowercase__ : Optional[Any] = model_output elif self.config.prediction_type == "v_prediction": lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """ " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 ) lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) lowercase__ : Optional[int] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ): return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ): return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __len__( self : Tuple ): return self.config.num_train_timesteps
81
1
import torch def __lowerCamelCase ( ): """simple docstring""" if torch.cuda.is_available(): lowercase__ : List[Any] = torch.cuda.device_count() else: lowercase__ : Any = 0 print(F"""Successfully ran on {num_gpus} GPUs""" ) if __name__ == "__main__": main()
81
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ): super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: lowercase__ : Optional[Any] = ( f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) lowercase__ : int = dict(scheduler.config ) lowercase__ : Any = 1 lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: lowercase__ : Optional[Any] = ( f"""The configuration file of this scheduler: {scheduler} has not set the configuration""" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = dict(scheduler.config ) lowercase__ : Union[str, Any] = True lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE ) if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase__ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) lowercase__ : Union[str, Any] = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case ( self : Optional[Any] ): if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ): lowercase__ : Dict = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE ) lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size ) # Run inpainting pipeline with the generated mask lowercase__ : int = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
81
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase__ = logging.get_logger(__name__) class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = ["""input_features"""] def __init__( self : Dict , SCREAMING_SNAKE_CASE : int=80 , SCREAMING_SNAKE_CASE : Union[str, Any]=16_000 , SCREAMING_SNAKE_CASE : Optional[int]=160 , SCREAMING_SNAKE_CASE : Optional[Any]=30 , SCREAMING_SNAKE_CASE : Dict=400 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : int=False , **SCREAMING_SNAKE_CASE : Tuple , ): super().__init__( feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) lowercase__ : List[str] = n_fft lowercase__ : Any = hop_length lowercase__ : Dict = chunk_length lowercase__ : Union[str, Any] = chunk_length * sampling_rate lowercase__ : Any = self.n_samples // hop_length lowercase__ : List[Any] = sampling_rate lowercase__ : List[str] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , ) def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.array ): lowercase__ : Tuple = spectrogram( SCREAMING_SNAKE_CASE , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) lowercase__ : Tuple = log_spec[:, :-1] lowercase__ : Any = np.maximum(SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 ) lowercase__ : Optional[Any] = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def snake_case ( SCREAMING_SNAKE_CASE : List[np.ndarray] , SCREAMING_SNAKE_CASE : List[np.ndarray] , SCREAMING_SNAKE_CASE : float = 0.0 ): if attention_mask is not None: lowercase__ : List[str] = np.array(SCREAMING_SNAKE_CASE , np.intaa ) lowercase__ : List[str] = [] for vector, length in zip(SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ): lowercase__ : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowercase__ : List[Any] = padding_value normed_input_values.append(SCREAMING_SNAKE_CASE ) else: lowercase__ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : str , SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[str] = "max_length" , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , **SCREAMING_SNAKE_CASE : List[Any] , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ : str = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowercase__ : Any = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): lowercase__ : Tuple = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ : Optional[Any] = [np.asarray([raw_speech] ).T] lowercase__ : List[str] = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding lowercase__ : List[Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowercase__ : Dict = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) lowercase__ : Dict = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format lowercase__ : Tuple = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) lowercase__ : str = [self._np_extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in input_features[0]] if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] else: lowercase__ : List[Any] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase__ : Optional[int] = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: lowercase__ : str = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs def snake_case ( self : Dict ): lowercase__ : Dict = copy.deepcopy(self.__dict__ ) lowercase__ : int = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
81
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] lowercase__ : str = True if "large" in model_name or "huge" in model_name else False lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowercase__ : int = [3, 3, 3, 3] lowercase__ : Tuple = [5, 5, 5, 5] elif "fl4" in model_name: lowercase__ : Optional[Any] = [4, 4, 4, 4] lowercase__ : Optional[Any] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowercase__ : Union[str, Any] = [3, 3, 3, 3] if "lrf" in model_name: lowercase__ : Union[str, Any] = [3, 3, 3, 3] else: lowercase__ : Tuple = [2, 2, 2, 2] if "tiny" in model_name: lowercase__ : Optional[Any] = 96 elif "small" in model_name: lowercase__ : List[str] = 96 elif "base" in model_name: lowercase__ : str = 128 elif "large" in model_name: lowercase__ : Any = 192 elif "xlarge" in model_name: lowercase__ : str = 256 elif "huge" in model_name: lowercase__ : List[str] = 352 # set label information lowercase__ : Tuple = "huggingface/label-files" if "large" in model_name or "huge" in model_name: lowercase__ : List[Any] = "imagenet-22k-id2label.json" else: lowercase__ : Optional[int] = "imagenet-1k-id2label.json" lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : int = {v: k for k, v in idalabel.items()} lowercase__ : str = FocalNetConfig( embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , ) return config def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "patch_embed.proj" in name: lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: lowercase__ : List[str] = "encoder." + name if "encoder.layers" in name: lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: lowercase__ : List[str] = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": lowercase__ : List[str] = "layernorm.weight" if name == "norm.bias": lowercase__ : List[Any] = "layernorm.bias" if "head" in name: lowercase__ : Optional[int] = name.replace("head" , "classifier" ) else: lowercase__ : Union[str, Any] = "focalnet." + name return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): """simple docstring""" lowercase__ : List[Any] = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on lowercase__ : Union[str, Any] = model_name_to_url[model_name] print("Checkpoint URL: " , lowerCamelCase__ ) lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ ) lowercase__ : List[str] = val lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ ) lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ ) model.eval() # load state dict model.load_state_dict(lowerCamelCase__ ) # verify conversion lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : int = BitImageProcessor( do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , ) lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" ) lowercase__ : Any = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 ) lowercase__ : List[Any] = model(**lowerCamelCase__ ) lowercase__ : int = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase__ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
81
1
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE : Optional[Features] = None , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[int] = None , **SCREAMING_SNAKE_CASE : int , ): super().__init__( SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE , streaming=SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) lowercase__ : Tuple = path_or_paths if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths} lowercase__ : int = Text( cache_dir=SCREAMING_SNAKE_CASE , data_files=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) def snake_case ( self : Any ): # Build iterable dataset if self.streaming: lowercase__ : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ : int = None lowercase__ : int = None lowercase__ : Union[str, Any] = None lowercase__ : List[str] = None self.builder.download_and_prepare( download_config=SCREAMING_SNAKE_CASE , download_mode=SCREAMING_SNAKE_CASE , verification_mode=SCREAMING_SNAKE_CASE , base_path=SCREAMING_SNAKE_CASE , num_proc=self.num_proc , ) lowercase__ : str = self.builder.as_dataset( split=self.split , verification_mode=SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory ) return dataset
81
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """informer""" lowercase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ): # time series specific configuration lowercase__ : Any = prediction_length lowercase__ : List[str] = context_length or prediction_length lowercase__ : Tuple = distribution_output lowercase__ : Union[str, Any] = loss lowercase__ : Union[str, Any] = input_size lowercase__ : List[str] = num_time_features lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowercase__ : List[str] = scaling lowercase__ : str = num_dynamic_real_features lowercase__ : Tuple = num_static_real_features lowercase__ : List[str] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Dict = cardinality else: lowercase__ : Dict = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Union[str, Any] = embedding_dimension else: lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase__ : Dict = num_parallel_samples # Transformer architecture configuration lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features lowercase__ : Optional[Any] = d_model lowercase__ : int = encoder_attention_heads lowercase__ : Tuple = decoder_attention_heads lowercase__ : List[Any] = encoder_ffn_dim lowercase__ : List[str] = decoder_ffn_dim lowercase__ : List[str] = encoder_layers lowercase__ : Tuple = decoder_layers lowercase__ : Union[str, Any] = dropout lowercase__ : List[Any] = attention_dropout lowercase__ : str = activation_dropout lowercase__ : int = encoder_layerdrop lowercase__ : Union[str, Any] = decoder_layerdrop lowercase__ : Tuple = activation_function lowercase__ : str = init_std lowercase__ : Tuple = use_cache # Informer lowercase__ : Union[str, Any] = attention_type lowercase__ : Union[str, Any] = sampling_factor lowercase__ : Tuple = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def snake_case ( self : str ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
81
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = StableDiffusionSAGPipeline lowercase_ = TEXT_TO_IMAGE_PARAMS lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase_ = False def snake_case ( self : str ): torch.manual_seed(0 ) lowercase__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) lowercase__ : Any = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) lowercase__ : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowercase__ : int = CLIPTextModel(SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowercase__ : Optional[int] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=0 ): if str(SCREAMING_SNAKE_CASE ).startswith("mps" ): lowercase__ : int = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = { "prompt": ".", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "sag_scale": 1.0, "output_type": "numpy", } return inputs def snake_case ( self : Optional[Any] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Optional[int] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : str ): lowercase__ : str = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ) lowercase__ : int = sag_pipe.to(SCREAMING_SNAKE_CASE ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = "." lowercase__ : Tuple = torch.manual_seed(0 ) lowercase__ : Any = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) lowercase__ : str = output.images lowercase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ : List[Any] = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def snake_case ( self : str ): lowercase__ : Dict = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) lowercase__ : Tuple = sag_pipe.to(SCREAMING_SNAKE_CASE ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = "." lowercase__ : int = torch.manual_seed(0 ) lowercase__ : Optional[Any] = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) lowercase__ : List[str] = output.images lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ : List[str] = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def snake_case ( self : Dict ): lowercase__ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) lowercase__ : List[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = "." lowercase__ : List[str] = torch.manual_seed(0 ) lowercase__ : Tuple = sag_pipe( [prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , ) lowercase__ : int = output.images assert image.shape == (1, 512, 768, 3)
81
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) else: lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) lowercase__ : int = ["key_proj", "value_proj", "query_proj"] lowercase__ : str = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: lowercase__ : Union[str, Any] = key.split("." ) if attributes[0] == "lm_head": lowercase__ : Tuple = prophet lowercase__ : Tuple = prophet_old else: lowercase__ : Tuple = prophet.prophetnet lowercase__ : List[str] = prophet_old.model lowercase__ : int = False for attribute in attributes: if attribute in mapping: lowercase__ : int = mapping[attribute] if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0: lowercase__ : Dict = attribute elif hasattr(lowerCamelCase__ , lowerCamelCase__ ): lowercase__ : Optional[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowercase__ : Any = old_model.weight logger.info(F"""{attribute} is initialized.""" ) lowercase__ : str = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowercase__ : Tuple = old_model.bias logger.info(F"""{attribute} is initialized""" ) lowercase__ : str = True break elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ): lowercase__ : str = old_model.in_proj_weight.shape[0] // 3 lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowercase__ : Tuple = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) lowercase__ : Union[str, Any] = True break if attribute.isdigit(): lowercase__ : str = model[int(lowerCamelCase__ )] lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )] else: lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ ) if old_attribute == "": lowercase__ : str = old_model else: if not hasattr(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError(F"""{old_model} does not have {old_attribute}""" ) lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ ) if not is_key_init: raise ValueError(F"""{key} was not correctly initialized!""" ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
81
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = XGLMTokenizer lowercase_ = XGLMTokenizerFast lowercase_ = True lowercase_ = True def snake_case ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing lowercase__ : Optional[int] = XGLMTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : Tuple ): lowercase__ : Optional[Any] = "<pad>" lowercase__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1_008 ) def snake_case ( self : int ): self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def snake_case ( self : str ): lowercase__ : int = XGLMTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase__ : str = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowercase__ : int = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) self.assertListEqual( SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase__ : List[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ) self.assertListEqual( SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def snake_case ( self : Optional[Any] ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def snake_case ( self : Dict ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(SCREAMING_SNAKE_CASE , f.name ) lowercase__ : List[str] = XGLMTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = pickle.dumps(SCREAMING_SNAKE_CASE ) pickle.loads(SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): if not self.test_rust_tokenizer: return lowercase__ : int = self.get_tokenizer() lowercase__ : Optional[int] = self.get_rust_tokenizer() lowercase__ : Optional[int] = "I was born in 92000, and this is falsé." lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = self.get_rust_tokenizer() lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def snake_case ( self : Optional[int] ): lowercase__ : str = "Hello World!" lowercase__ : List[str] = [2, 31_227, 4_447, 35] self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) ) @slow def snake_case ( self : List[str] ): lowercase__ : int = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off lowercase__ : List[Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) ) @slow def snake_case ( self : Optional[int] ): # fmt: off lowercase__ : Union[str, Any] = { "input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE , model_name="facebook/xglm-564M" , padding=SCREAMING_SNAKE_CASE , )
81
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = GPTaTokenizer lowercase_ = GPTaTokenizerFast lowercase_ = True lowercase_ = {"""add_prefix_space""": True} lowercase_ = False def snake_case ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : List[str] = {"unk_token": "<unk>"} lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ): kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : List[str] = "lower newer" lowercase__ : Optional[Any] = "lower newer" return input_text, output_text def snake_case ( self : Any ): lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__ : Dict = "lower newer" lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokens + [tokenizer.unk_token] lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if not self.test_rust_tokenizer: return lowercase__ : Dict = self.get_tokenizer() lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" # Testing tokenization lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids without special tokens lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids with special tokens lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing the unknown token lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token] lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # Simple input lowercase__ : Dict = "This is a simple input" lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[int] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) def snake_case ( self : Any ): lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input lowercase__ : Optional[int] = "This is a simple input" lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"] lowercase__ : List[Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[Any] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] lowercase__ : Any = tokenizer.pad_token_id lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def snake_case ( self : str ): lowercase__ : List[str] = "$$$" lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = "This is a simple input" lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Optional[int] = tokenizer.bos_token_id lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids ) lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def snake_case ( self : Optional[int] ): pass def snake_case ( self : Tuple ): # TODO: change to self.get_tokenizers() when the fast version is implemented lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase__ : str = "Encode this." lowercase__ : List[Any] = "This one too please." lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.encode_plus( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , ) lowercase__ : Tuple = encoded_sequence_dict["input_ids"] lowercase__ : int = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) lowercase__ : List[str] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE ) ] lowercase__ : Any = [x for x in filtered_sequence if x is not None] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @require_tokenizers class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Union[str, Any] ): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("test_opt" ) lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" ) lowercase__ : Dict = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE ) lowercase__ : int = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # Same as above self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def snake_case ( self : Tuple ): lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "bos" lowercase__ : List[Any] = tokenizer.get_vocab()["bos"] lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Union[str, Any] = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # We changed the bos token self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("./tok" ) lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
81
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """char""" lowercase_ = """bpe""" lowercase_ = """wp""" lowerCAmelCase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = ["""image_processor""", """char_tokenizer"""] lowercase_ = """ViTImageProcessor""" lowercase_ = """MgpstrTokenizer""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : int=None , **SCREAMING_SNAKE_CASE : str ): lowercase__ : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , SCREAMING_SNAKE_CASE , ) lowercase__ : List[str] = kwargs.pop("feature_extractor" ) lowercase__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) lowercase__ : Tuple = tokenizer lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("gpt2" ) lowercase__ : List[Any] = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __call__( self : Any , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : str=None , **SCREAMING_SNAKE_CASE : int ): if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: lowercase__ : Any = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text is not None: lowercase__ : List[str] = self.char_tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text is None: return inputs elif images is None: return encodings else: lowercase__ : Any = encodings["input_ids"] return inputs def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ , lowercase__ , lowercase__ : Tuple = sequences lowercase__ : Optional[int] = char_preds.size(0 ) lowercase__ , lowercase__ : List[Any] = self._decode_helper(SCREAMING_SNAKE_CASE , "char" ) lowercase__ , lowercase__ : Optional[int] = self._decode_helper(SCREAMING_SNAKE_CASE , "bpe" ) lowercase__ , lowercase__ : Any = self._decode_helper(SCREAMING_SNAKE_CASE , "wp" ) lowercase__ : Tuple = [] lowercase__ : Dict = [] for i in range(SCREAMING_SNAKE_CASE ): lowercase__ : str = [char_scores[i], bpe_scores[i], wp_scores[i]] lowercase__ : Union[str, Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] lowercase__ : Any = scores.index(max(SCREAMING_SNAKE_CASE ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) lowercase__ : str = {} lowercase__ : int = final_strs lowercase__ : List[str] = final_scores lowercase__ : int = char_strs lowercase__ : Any = bpe_strs lowercase__ : Union[str, Any] = wp_strs return out def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ): if format == DecodeType.CHARACTER: lowercase__ : Tuple = self.char_decode lowercase__ : Optional[Any] = 1 lowercase__ : List[Any] = "[s]" elif format == DecodeType.BPE: lowercase__ : int = self.bpe_decode lowercase__ : Dict = 2 lowercase__ : str = "#" elif format == DecodeType.WORDPIECE: lowercase__ : str = self.wp_decode lowercase__ : List[str] = 102 lowercase__ : Dict = "[SEP]" else: raise ValueError(f"""Format {format} is not supported.""" ) lowercase__ , lowercase__ : Dict = [], [] lowercase__ : Dict = pred_logits.size(0 ) lowercase__ : str = pred_logits.size(1 ) lowercase__ , lowercase__ : int = pred_logits.topk(1 , dim=-1 , largest=SCREAMING_SNAKE_CASE , sorted=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = preds_index.view(-1 , SCREAMING_SNAKE_CASE )[:, 1:] lowercase__ : Optional[int] = decoder(SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ : Dict = torch.nn.functional.softmax(SCREAMING_SNAKE_CASE , dim=2 ).max(dim=2 ) lowercase__ : List[Any] = preds_max_prob[:, 1:] for index in range(SCREAMING_SNAKE_CASE ): lowercase__ : List[str] = preds_str[index].find(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = preds_str[index][:pred_eos] lowercase__ : int = preds_index[index].cpu().tolist() lowercase__ : Tuple = pred_index.index(SCREAMING_SNAKE_CASE ) if eos_token in pred_index else -1 lowercase__ : Dict = preds_max_prob[index][: pred_eos_index + 1] lowercase__ : int = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(SCREAMING_SNAKE_CASE ) conf_scores.append(SCREAMING_SNAKE_CASE ) return dec_strs, conf_scores def snake_case ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase__ : Tuple = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(SCREAMING_SNAKE_CASE )] return decode_strs def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ): return self.bpe_tokenizer.batch_decode(SCREAMING_SNAKE_CASE ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase__ : str = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(SCREAMING_SNAKE_CASE )] return decode_strs
81
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
1
import os import sys import unittest lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowerCAmelCase__ = os.path.join(git_repo_path, '''src''', '''transformers''') lowerCAmelCase__ = ''' {0} = None ''' lowerCAmelCase__ = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' lowerCAmelCase__ = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Optional[int] ): lowercase__ : Any = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(SCREAMING_SNAKE_CASE , "tokenizers" ) lowercase__ : Tuple = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(SCREAMING_SNAKE_CASE , "tensorflow_text" ) lowercase__ : Optional[Any] = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(SCREAMING_SNAKE_CASE , "sentencepiece_and_tokenizers" ) lowercase__ : Union[str, Any] = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(SCREAMING_SNAKE_CASE , "sentencepiece_and_tensorflow_text" ) lowercase__ : Optional[Any] = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(SCREAMING_SNAKE_CASE , "sentencepiece_and_tokenizers_and_vision" ) def snake_case ( self : Dict ): lowercase__ : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , SCREAMING_SNAKE_CASE ) self.assertIn("tensorflow_text" , SCREAMING_SNAKE_CASE ) self.assertIn("sentencepiece_and_tokenizers" , SCREAMING_SNAKE_CASE ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def snake_case ( self : List[str] ): lowercase__ : Optional[int] = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(SCREAMING_SNAKE_CASE , "\nCONSTANT = None\n" ) lowercase__ : List[str] = create_dummy_object("function" , "'torch'" ) self.assertEqual( SCREAMING_SNAKE_CASE , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) lowercase__ : List[str] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" lowercase__ : List[Any] = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict ): lowercase__ : Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" lowercase__ : Any = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , SCREAMING_SNAKE_CASE )
81
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case__: """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ): lowercase__ : Union[str, Any] = parent lowercase__ : Optional[int] = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : List[Any] = patch_size lowercase__ : Any = num_channels lowercase__ : Optional[int] = is_training lowercase__ : Dict = use_labels lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : Union[str, Any] = attention_probs_dropout_prob lowercase__ : List[Any] = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : Optional[int] = mask_ratio lowercase__ : Union[str, Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase__ : List[Any] = (image_size // patch_size) ** 2 lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def snake_case ( self : int ): lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : str = None if self.use_labels: lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any] = self.get_config() return config, pixel_values, labels def snake_case ( self : Tuple ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ): lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) # expected sequence length = num_patches lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2 lowercase__ : List[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase__ : Dict = 1 lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def snake_case ( self : Optional[int] ): lowercase__ : int = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs lowercase__ : str = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def snake_case ( self : List[str] ): lowercase__ : List[Any] = TFViTMAEModelTester(self ) lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def snake_case ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : Optional[int] ): lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowercase__ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) ) def snake_case ( self : Optional[Any] ): lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Union[str, Any] = [*signature.parameters.keys()] lowercase__ : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): # make the mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = outputs_dict[0].numpy() lowercase__ : Optional[int] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def snake_case ( self : str ): # make the mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : Tuple = {} for k, v in inputs_dict.items(): if tf.is_tensor(SCREAMING_SNAKE_CASE ): lowercase__ : Any = v.numpy() else: lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE ) return inputs_np_dict for model_class in self.all_model_classes: lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ): # make masks reproducible np.random.seed(2 ) lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase__ : Optional[int] = tf_noise super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(SCREAMING_SNAKE_CASE ) if module_member_name.endswith("MainLayer" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )] for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE ) } lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) inputs_dict.update({"noise": noise} ) for main_layer_class in tf_main_layer_classes: lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) ) lowercase__ : str = model(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" ) model.save(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = tf.keras.models.load_model( SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model ) lowercase__ : Dict = model(SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def snake_case ( self : Optional[int] ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) if model_class.__name__ == "TFViTMAEModel": lowercase__ : str = outputs.last_hidden_state.numpy() lowercase__ : Optional[Any] = 0 else: lowercase__ : Optional[Any] = outputs.logits.numpy() lowercase__ : Optional[int] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) if model_class.__name__ == "TFViTMAEModel": lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy() lowercase__ : Optional[int] = 0 else: lowercase__ : str = after_outputs["logits"].numpy() lowercase__ : Tuple = 0 lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 ) def snake_case ( self : List[Any] ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : str = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(SCREAMING_SNAKE_CASE ) lowercase__ : int = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowercase__ : Any = model_class.from_config(model.config ) lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model new_model.set_weights(model.get_weights() ) lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def snake_case ( self : List[Any] ): pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def snake_case ( self : str ): pass @slow def snake_case ( self : List[Any] ): lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self : Any ): return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def snake_case ( self : Union[str, Any] ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ) lowercase__ : Optional[Any] = self.default_image_processor lowercase__ : Union[str, Any] = prepare_img() lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase__ : Union[str, Any] = ViTMAEConfig() lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) ) # forward pass lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) # verify the logits lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = tf.convert_to_tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
81
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case__: """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=10 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Tuple=32 * 8 , SCREAMING_SNAKE_CASE : Optional[Any]=32 * 8 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Tuple=64 , ): lowercase__ : Union[str, Any] = parent lowercase__ : int = batch_size lowercase__ : List[str] = is_training lowercase__ : int = use_auxiliary_loss lowercase__ : Any = num_queries lowercase__ : List[str] = num_channels lowercase__ : int = min_size lowercase__ : int = max_size lowercase__ : Tuple = num_labels lowercase__ : Tuple = hidden_dim lowercase__ : int = hidden_dim def snake_case ( self : Tuple ): lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE ) > 0.5 ).float() lowercase__ : Any = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE ) > 0.5).long() lowercase__ : str = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def snake_case ( self : str ): lowercase__ : Optional[int] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) lowercase__ : Any = self.num_queries lowercase__ : str = self.num_labels lowercase__ : Optional[int] = [1, 1, 1, 1] lowercase__ : Optional[int] = self.num_channels lowercase__ : Union[str, Any] = 64 lowercase__ : int = 128 lowercase__ : Tuple = self.hidden_dim lowercase__ : List[Any] = self.hidden_dim lowercase__ : Any = self.hidden_dim return config def snake_case ( self : int ): lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = self.prepare_config_and_inputs() lowercase__ : Union[str, Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : int = output.encoder_hidden_states lowercase__ : List[Any] = output.pixel_decoder_hidden_states lowercase__ : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE ) , config.decoder_layers ) def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=False ): with torch.no_grad(): lowercase__ : List[Any] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE ) lowercase__ : int = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase__ : int = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE : Optional[Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowercase__ : Union[str, Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE ) comm_check_on_output(SCREAMING_SNAKE_CASE ) lowercase__ : int = model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE ) comm_check_on_output(SCREAMING_SNAKE_CASE ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () lowercase_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def snake_case ( self : Optional[int] ): lowercase__ : str = MaskaFormerModelTester(self ) lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE ) def snake_case ( self : int ): self.config_tester.run_common_tests() def snake_case ( self : int ): lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE ) def snake_case ( self : int ): lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE ) @unittest.skip(reason="Mask2Former does not use inputs_embeds" ) def snake_case ( self : List[Any] ): pass @unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" ) def snake_case ( self : List[str] ): pass @unittest.skip(reason="Mask2Former is not a generative model" ) def snake_case ( self : Optional[Any] ): pass @unittest.skip(reason="Mask2Former does not use token embeddings" ) def snake_case ( self : Optional[int] ): pass @require_torch_multi_gpu @unittest.skip( reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def snake_case ( self : int ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def snake_case ( self : Dict ): pass def snake_case ( self : int ): lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : List[Any] = [*signature.parameters.keys()] lowercase__ : Optional[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) @slow def snake_case ( self : Any ): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: lowercase__ : Dict = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] ): lowercase__ : Optional[Any] = (self.model_tester.min_size,) * 2 lowercase__ : Optional[Any] = { "pixel_values": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE ), "mask_labels": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE ), "class_labels": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE ).long(), } lowercase__ : Any = self.model_tester.get_config() lowercase__ : Any = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.loss is not None ) def snake_case ( self : int ): lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Any = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.attentions is not None ) def snake_case ( self : Union[str, Any] ): if not self.model_tester.is_training: return lowercase__ : Union[str, Any] = self.all_model_classes[1] lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.train() lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE ).loss loss.backward() def snake_case ( self : Optional[Any] ): lowercase__ : Any = self.all_model_classes[1] lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() lowercase__ : int = True lowercase__ : Tuple = True lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) model.train() lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowercase__ : Dict = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() lowercase__ : Any = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowercase__ : List[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1e-4 def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class snake_case__(unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self : Dict ): return "facebook/mask2former-swin-small-coco-instance" @cached_property def snake_case ( self : Tuple ): return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def snake_case ( self : Optional[int] ): lowercase__ : Optional[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = self.default_image_processor lowercase__ : Optional[Any] = prepare_img() lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384) ) with torch.no_grad(): lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE ) lowercase__ : Dict = torch.tensor( [[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) lowercase__ : Tuple = torch.tensor( [[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) lowercase__ : List[Any] = torch.tensor( [[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) def snake_case ( self : str ): lowercase__ : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE ).eval() lowercase__ : Dict = self.default_image_processor lowercase__ : List[Any] = prepare_img() lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384) ) with torch.no_grad(): lowercase__ : int = model(**SCREAMING_SNAKE_CASE ) # masks_queries_logits lowercase__ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) lowercase__ : Optional[int] = [ [-8.7_839, -9.0_056, -8.8_121], [-7.4_104, -7.0_313, -6.5_401], [-6.6_105, -6.3_427, -6.4_675], ] lowercase__ : Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) # class_queries_logits lowercase__ : str = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) lowercase__ : str = torch.tensor( [ [1.8_324, -8.0_835, -4.1_922], [0.8_450, -9.0_050, -3.6_053], [0.3_045, -7.7_293, -3.0_275], ] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) ) def snake_case ( self : Union[str, Any] ): lowercase__ : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE ).eval() lowercase__ : Optional[Any] = self.default_image_processor lowercase__ : Tuple = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) lowercase__ : Optional[int] = inputs["pixel_values"].to(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = [el.to(SCREAMING_SNAKE_CASE ) for el in inputs["mask_labels"]] lowercase__ : Optional[Any] = [el.to(SCREAMING_SNAKE_CASE ) for el in inputs["class_labels"]] with torch.no_grad(): lowercase__ : str = model(**SCREAMING_SNAKE_CASE ) self.assertTrue(outputs.loss is not None )
81
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO Update this lowerCAmelCase__ = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """esm""" def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = vocab_size lowercase__ : int = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : List[str] = initializer_range lowercase__ : Optional[Any] = layer_norm_eps lowercase__ : Optional[int] = position_embedding_type lowercase__ : Optional[int] = use_cache lowercase__ : Optional[int] = emb_layer_norm_before lowercase__ : List[str] = token_dropout lowercase__ : Optional[int] = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) lowercase__ : Dict = EsmFoldConfig() elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE ) lowercase__ : Dict = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) lowercase__ : List[str] = get_default_vocab_list() else: lowercase__ : List[Any] = vocab_list else: lowercase__ : List[Any] = None lowercase__ : List[str] = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def snake_case ( self : List[str] ): lowercase__ : Optional[Any] = super().to_dict() if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ): lowercase__ : Dict = self.esmfold_config.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = None lowercase_ = True lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = 0 lowercase_ = True lowercase_ = False lowercase_ = 1_2_8 lowercase_ = None def snake_case ( self : Optional[int] ): if self.trunk is None: lowercase__ : Dict = TrunkConfig() elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ): lowercase__ : int = TrunkConfig(**self.trunk ) def snake_case ( self : Union[str, Any] ): lowercase__ : int = asdict(self ) lowercase__ : Any = self.trunk.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = 4_8 lowercase_ = 1_0_2_4 lowercase_ = 1_2_8 lowercase_ = 3_2 lowercase_ = 3_2 lowercase_ = 3_2 lowercase_ = 0 lowercase_ = 0 lowercase_ = False lowercase_ = 4 lowercase_ = 1_2_8 lowercase_ = None def snake_case ( self : Dict ): if self.structure_module is None: lowercase__ : str = StructureModuleConfig() elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def snake_case ( self : Optional[Any] ): lowercase__ : int = asdict(self ) lowercase__ : Optional[int] = self.structure_module.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = 3_8_4 lowercase_ = 1_2_8 lowercase_ = 1_6 lowercase_ = 1_2_8 lowercase_ = 1_2 lowercase_ = 4 lowercase_ = 8 lowercase_ = 0.1 lowercase_ = 8 lowercase_ = 1 lowercase_ = 2 lowercase_ = 7 lowercase_ = 1_0 lowercase_ = 1e-8 lowercase_ = 1e5 def snake_case ( self : Dict ): return asdict(self ) def __lowerCamelCase ( ): """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
81
1
class snake_case__: """simple docstring""" def __init__( self : List[Any] ): lowercase__ : Tuple = "" lowercase__ : List[Any] = "" lowercase__ : Union[str, Any] = [] def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: lowercase__ : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: lowercase__ : Tuple = self.__min_dist_top_down_dp(SCREAMING_SNAKE_CASE , n - 1 ) lowercase__ : Any = self.__min_dist_top_down_dp(m - 1 , SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 ) lowercase__ : str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return self.dp[m][n] def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ): lowercase__ : Dict = worda lowercase__ : Union[str, Any] = worda lowercase__ : Optional[int] = [[-1 for _ in range(len(SCREAMING_SNAKE_CASE ) )] for _ in range(len(SCREAMING_SNAKE_CASE ) )] return self.__min_dist_top_down_dp(len(SCREAMING_SNAKE_CASE ) - 1 , len(SCREAMING_SNAKE_CASE ) - 1 ) def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ): lowercase__ : Dict = worda lowercase__ : Optional[Any] = worda lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty lowercase__ : Optional[Any] = j elif j == 0: # second string is empty lowercase__ : Any = i elif worda[i - 1] == worda[j - 1]: # last characters are equal lowercase__ : Optional[int] = self.dp[i - 1][j - 1] else: lowercase__ : str = self.dp[i][j - 1] lowercase__ : Union[str, Any] = self.dp[i - 1][j] lowercase__ : List[str] = self.dp[i - 1][j - 1] lowercase__ : Dict = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return self.dp[m][n] if __name__ == "__main__": lowerCAmelCase__ = EditDistance() print('''****************** Testing Edit Distance DP Algorithm ******************''') print() lowerCAmelCase__ = input('''Enter the first string: ''').strip() lowerCAmelCase__ = input('''Enter the second string: ''').strip() print() print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''') print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''') print() print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
81
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """deformable_detr""" lowercase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : List[Any] = backbone_config.get("model_type" ) lowercase__ : Any = CONFIG_MAPPING[backbone_model_type] lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE ) lowercase__ : int = use_timm_backbone lowercase__ : Optional[Any] = backbone_config lowercase__ : Union[str, Any] = num_channels lowercase__ : List[Any] = num_queries lowercase__ : List[Any] = max_position_embeddings lowercase__ : Union[str, Any] = d_model lowercase__ : Union[str, Any] = encoder_ffn_dim lowercase__ : Optional[Any] = encoder_layers lowercase__ : Optional[Any] = encoder_attention_heads lowercase__ : Optional[Any] = decoder_ffn_dim lowercase__ : List[Any] = decoder_layers lowercase__ : Optional[int] = decoder_attention_heads lowercase__ : str = dropout lowercase__ : Union[str, Any] = attention_dropout lowercase__ : List[str] = activation_dropout lowercase__ : Optional[Any] = activation_function lowercase__ : Optional[Any] = init_std lowercase__ : str = init_xavier_std lowercase__ : Any = encoder_layerdrop lowercase__ : int = auxiliary_loss lowercase__ : Dict = position_embedding_type lowercase__ : int = backbone lowercase__ : Optional[Any] = use_pretrained_backbone lowercase__ : List[Any] = dilation # deformable attributes lowercase__ : Dict = num_feature_levels lowercase__ : Optional[int] = encoder_n_points lowercase__ : Any = decoder_n_points lowercase__ : int = two_stage lowercase__ : int = two_stage_num_proposals lowercase__ : Union[str, Any] = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowercase__ : List[Any] = class_cost lowercase__ : Optional[int] = bbox_cost lowercase__ : Any = giou_cost # Loss coefficients lowercase__ : List[str] = mask_loss_coefficient lowercase__ : int = dice_loss_coefficient lowercase__ : Any = bbox_loss_coefficient lowercase__ : Any = giou_loss_coefficient lowercase__ : Optional[int] = eos_coefficient lowercase__ : int = focal_alpha lowercase__ : Dict = disable_custom_kernels super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def snake_case ( self : List[Any] ): return self.encoder_attention_heads @property def snake_case ( self : Union[str, Any] ): return self.d_model def snake_case ( self : str ): lowercase__ : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowercase__ : int = self.backbone_config.to_dict() lowercase__ : Union[str, Any] = self.__class__.model_type return output
81
1
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput lowerCAmelCase__ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class snake_case__(_lowerCamelCase ): """simple docstring""" def __init__( self : int , *SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] ): super().__init__(*A__ , **A__ ) lowercase__ : Tuple = eval_examples lowercase__ : Optional[int] = post_process_function lowercase__ : Tuple = quant_trainer_args lowercase__ : int = 128 # default number of calibration samples def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[int]=None ): if calib_dataset is None and self.calib_dataset is None: raise ValueError("Trainer: calibration requires an calib_dataset." ) lowercase__ : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset lowercase__ : Tuple = self._remove_unused_columns(A__ , description="Calibration" ) return DataLoader( A__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A__ , ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str]=None ): lowercase__ : Optional[Any] = self.train_dataset if calib_dataset is None else calib_dataset lowercase__ : Dict = self.get_calib_dataloader(A__ ) lowercase__ : Dict = self.model quant_trainer.configure_model(A__ , self.quant_trainer_args , calib=A__ ) model.eval() quant_trainer.enable_calibration(A__ ) logger.info("***** Running calibration *****" ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(A__ ): # Prediction step lowercase__ , lowercase__ , lowercase__ : int = self.prediction_step(A__ , A__ , prediction_loss_only=A__ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(A__ , self.quant_trainer_args ) lowercase__ : Any = model def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Any = "eval" ): lowercase__ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset lowercase__ : Union[str, Any] = self.get_eval_dataloader(A__ ) lowercase__ : List[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowercase__ : Union[str, Any] = self.compute_metrics lowercase__ : int = None lowercase__ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowercase__ : Any = eval_loop( A__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , ) finally: lowercase__ : List[str] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: lowercase__ : Dict = self.post_process_function(A__ , A__ , output.predictions ) lowercase__ : Dict = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): lowercase__ : Optional[int] = metrics.pop(A__ ) self.log(A__ ) else: lowercase__ : Tuple = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowercase__ : Union[str, Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , A__ ) return metrics def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Union[str, Any] = "test" ): lowercase__ : Optional[Any] = self.get_test_dataloader(A__ ) # Temporarily disable metric computation, we will do it in the loop here. lowercase__ : Optional[int] = self.compute_metrics lowercase__ : Tuple = None lowercase__ : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowercase__ : List[str] = eval_loop( A__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A__ , ) finally: lowercase__ : List[Any] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output lowercase__ : Optional[int] = self.post_process_function(A__ , A__ , output.predictions , "predict" ) lowercase__ : Optional[int] = self.compute_metrics(A__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): lowercase__ : Any = metrics.pop(A__ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A__ ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]="./" ): lowercase__ : int = self.eval_dataset lowercase__ : List[Any] = self.get_eval_dataloader(A__ ) lowercase__ : Tuple = next(iter(A__ ) ) # saving device - to make it consistent lowercase__ : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) # convert to tuple lowercase__ : Union[str, Any] = tuple(v.to(A__ ) for k, v in batch.items() ) logger.info("Converting model to be onnx compatible" ) from pytorch_quantization.nn import TensorQuantizer lowercase__ : int = True lowercase__ : str = self.model.to(A__ ) model.eval() model.float() lowercase__ : Tuple = model.module if hasattr(A__ , "module" ) else model quant_trainer.configure_model(A__ , self.quant_trainer_args ) lowercase__ : Optional[Any] = os.path.join(A__ , "model.onnx" ) logger.info(f"""exporting model to {output_model_file}""" ) lowercase__ : Union[str, Any] = {0: "batch_size", 1: "seq_len"} torch.onnx.export( A__ , A__ , A__ , export_params=A__ , opset_version=13 , do_constant_folding=A__ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={ "input_ids": axes, "attention_mask": axes, "token_type_ids": axes, "output_start_logits": axes, "output_end_logits": axes, } , verbose=A__ , ) logger.info("onnx export finished" )
700
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowerCAmelCase__ = logging.get_logger(__name__) class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = ["""pixel_values"""] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ): super().__init__(**SCREAMING_SNAKE_CASE ) lowercase__ : str = do_rescale lowercase__ : Optional[Any] = rescale_factor lowercase__ : Any = do_pad lowercase__ : Optional[Any] = pad_size def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ): return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ): lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height lowercase__ : List[Any] = (old_width // size + 1) * size - old_width return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ): lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : str = do_pad if do_pad is not None else self.do_pad lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_pad: lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images] lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] lowercase__ : Optional[Any] = {"pixel_values": images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
81
0
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Optional[int] = torch.load(__A , map_location="cpu" ) lowercase__ : Optional[int] = chkpt["model"] # We have the base model one level deeper than the original XLM repository lowercase__ : Tuple = {} for k, v in state_dict.items(): if "pred_layer" in k: lowercase__ : Dict = v else: lowercase__ : Tuple = v lowercase__ : Optional[Any] = chkpt["params"] lowercase__ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__A , (torch.FloatTensor, numpy.ndarray) )} lowercase__ : Dict = chkpt["dico_word2id"] lowercase__ : Optional[Any] = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()} # Save pytorch-model lowercase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME lowercase__ : List[str] = pytorch_dump_folder_path + "/" + CONFIG_NAME lowercase__ : Tuple = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(__A , __A ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__A , indent=2 ) + "\n" ) print(F"""Save vocab file to {pytorch_config_dump_path}""" ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__A , indent=2 ) + "\n" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
701
import argparse import json from tqdm import tqdm def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , ) parser.add_argument( "--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , ) parser.add_argument( "--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , ) lowercase__ : Dict = parser.parse_args() with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open( args.gold_data_path , "w" ) as gold_file: lowercase__ : List[str] = json.load(lowerCamelCase__ ) for dpr_record in tqdm(lowerCamelCase__ ): lowercase__ : Any = dpr_record["question"] lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + "\n" ) gold_file.write("\t".join(lowerCamelCase__ ) + "\n" ) if __name__ == "__main__": main()
81
0
import itertools import math def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowerCamelCase ( ): """simple docstring""" lowercase__ : str = 2 while True: if is_prime(_SCREAMING_SNAKE_CASE ): yield num num += 1 def __lowerCamelCase ( lowerCamelCase__ = 10_001 ): """simple docstring""" return next(itertools.islice(prime_generator() , nth - 1 , _SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": print(f'''{solution() = }''')
702
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ = logging.getLogger(__name__) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : str = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , ) parser.add_argument( "--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , ) parser.add_argument( "--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , ) parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] ) parser.add_argument( "--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , ) parser.add_argument( "--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." , ) parser.add_argument( "--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." , ) lowercase__ : Optional[int] = parser.parse_args() return args def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" def fn(lowerCamelCase__ ): return tokenizer(examples["text"] ) return fn def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : str = [] for i in range(len(tokenized_data["input_ids"] ) ): lowercase__ : str = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ ) lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ ) lowercase__ : str = example.SerializeToString() records.append(lowerCamelCase__ ) return records def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit ) lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) ) print(F"""Limiting the dataset to {args.limit} entries.""" ) lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) lowercase__ : Any = os.path.join(args.output_dir , args.split ) if not os.path.exists(lowerCamelCase__ ): os.makedirs(lowerCamelCase__ ) else: lowercase__ : str = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. lowercase__ : str = tokenize_function(lowerCamelCase__ ) lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowerCamelCase__ ): # Concatenate all texts. lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()} lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 lowercase__ : List[str] = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. lowercase__ : Optional[int] = { k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )] for k, t in concatenated_examples.items() } return result lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 ) lowercase__ : str = 0 lowercase__ : str = 0 for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ): lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size] lowercase__ : str = len(dataset_snapshot["input_ids"] ) lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" ) lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ ) with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file: for i in range(len(lowerCamelCase__ ) ): lowercase__ : Optional[int] = serialized_examples[i] out_file.write(lowerCamelCase__ ) print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) ) shard_count += 1 total_records += records_containing with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f: print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = parse_args() main(args)
81
0
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class snake_case__: """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : Dict=32 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Dict=10 , SCREAMING_SNAKE_CASE : Dict=[8, 16, 32, 64] , SCREAMING_SNAKE_CASE : List[Any]=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]="relu" , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[Any]=[2, 3, 4] , SCREAMING_SNAKE_CASE : List[str]=1 , ): lowercase__ : Dict = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : str = image_size lowercase__ : List[Any] = num_channels lowercase__ : Any = embeddings_size lowercase__ : Tuple = hidden_sizes lowercase__ : Optional[int] = depths lowercase__ : List[Any] = is_training lowercase__ : List[str] = use_labels lowercase__ : str = hidden_act lowercase__ : str = num_labels lowercase__ : Optional[int] = scope lowercase__ : Optional[int] = len(A_ ) lowercase__ : Any = out_features lowercase__ : Optional[int] = out_indices lowercase__ : Any = num_groups def snake_case ( self : Dict ): lowercase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : str = None if self.use_labels: lowercase__ : int = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def snake_case ( self : Union[str, Any] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ): lowercase__ : List[str] = BitModel(config=A_ ) model.to(A_ ) model.eval() lowercase__ : Optional[Any] = model(A_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : Any = self.num_labels lowercase__ : str = BitForImageClassification(A_ ) model.to(A_ ) model.eval() lowercase__ : str = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Dict = BitBackbone(config=A_ ) model.to(A_ ) model.eval() lowercase__ : List[str] = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase__ : Tuple = None lowercase__ : Union[str, Any] = BitBackbone(config=A_ ) model.to(A_ ) model.eval() lowercase__ : List[str] = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def snake_case ( self : List[str] ): lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs lowercase__ : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" lowercase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () lowercase_ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def snake_case ( self : List[Any] ): lowercase__ : Dict = BitModelTester(self ) lowercase__ : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ ) def snake_case ( self : List[Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : str ): return @unittest.skip(reason="Bit does not output attentions" ) def snake_case ( self : int ): pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def snake_case ( self : str ): pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : Dict ): lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(A_ ) lowercase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , A_ ) def snake_case ( self : Tuple ): lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def snake_case ( self : str ): lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A_ ) def snake_case ( self : Union[str, Any] ): lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Tuple = model_class(config=A_ ) for name, module in model.named_modules(): if isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def snake_case ( self : Any ): def check_hidden_states_output(SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Any = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(A_ , A_ ) ) lowercase__ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(A_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any] = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ : Optional[int] = layer_type lowercase__ : Optional[Any] = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[Any] = True check_hidden_states_output(A_ , A_ , A_ ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def snake_case ( self : Optional[Any] ): pass def snake_case ( self : Any ): lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def snake_case ( self : Tuple ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = BitModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self : Optional[int] ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def snake_case ( self : int ): lowercase__ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ ) lowercase__ : int = self.default_image_processor lowercase__ : str = prepare_img() lowercase__ : Optional[int] = image_processor(images=A_ , return_tensors="pt" ).to(A_ ) # forward pass with torch.no_grad(): lowercase__ : List[Any] = model(**A_ ) # verify the logits lowercase__ : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , A_ ) lowercase__ : List[Any] = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) ) @require_torch class snake_case__(_SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" lowercase_ = (BitBackbone,) if is_torch_available() else () lowercase_ = BitConfig lowercase_ = False def snake_case ( self : int ): lowercase__ : int = BitModelTester(self )
703
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__: """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ): lowercase__ : Union[str, Any] = parent lowercase__ : Optional[int] = batch_size lowercase__ : Optional[Any] = image_size lowercase__ : Tuple = num_channels lowercase__ : Tuple = num_stages lowercase__ : List[Any] = hidden_sizes lowercase__ : Any = depths lowercase__ : List[str] = is_training lowercase__ : int = use_labels lowercase__ : Union[str, Any] = intermediate_size lowercase__ : List[Any] = hidden_act lowercase__ : Tuple = num_labels lowercase__ : Optional[Any] = initializer_range lowercase__ : Optional[Any] = out_features lowercase__ : Union[str, Any] = out_indices lowercase__ : Tuple = scope def snake_case ( self : Dict ): lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Dict = None if self.use_labels: lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def snake_case ( self : Tuple ): return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase__ : str = None lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def snake_case ( self : Dict ): lowercase__ : str = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs lowercase__ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict def snake_case ( self : Optional[Any] ): lowercase__ : Optional[Any] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase_ = ( {"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def snake_case ( self : List[Any] ): lowercase__ : List[str] = ConvNextVaModelTester(self ) lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def snake_case ( self : Optional[int] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : List[str] ): return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def snake_case ( self : Dict ): pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def snake_case ( self : Union[str, Any] ): pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : Optional[int] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() lowercase__ : List[str] = True if model_class.__name__ in [ *get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE ), ]: continue lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.train() lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss loss.backward() def snake_case ( self : Optional[Any] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels() lowercase__ : Optional[Any] = False lowercase__ : Dict = True if ( model_class.__name__ in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )] or not model_class.supports_gradient_checkpointing ): continue lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.gradient_checkpointing_enable() model.train() lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss loss.backward() def snake_case ( self : int ): lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : str = [*signature.parameters.keys()] lowercase__ : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict ): lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ): lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Dict = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Optional[Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) @slow def snake_case ( self : List[str] ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self : List[Any] ): return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = self.default_image_processor lowercase__ : int = prepare_img() lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE ) # verify the logits lowercase__ : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
81
0
'''simple docstring''' def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : List[str] = word.split() def justify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str: lowercase__ : Dict = max_width - width lowercase__ : Tuple = len(_lowercase ) if len(_lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowercase__ : Tuple = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowercase__ : str = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowercase__ : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_lowercase ): num_spaces_between_words_list[i] += 1 lowercase__ : Union[str, Any] = [] for i in range(_lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * " " ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_lowercase ) lowercase__ : str = [] lowercase__ : list[str] = [] lowercase__ : Union[str, Any] = 0 for word in words: if width + len(_lowercase ) + len(_lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_lowercase ) width += len(_lowercase ) else: # justify the line and add it to result answer.append(justify(_lowercase , _lowercase , _lowercase ) ) # reset new line and new width lowercase__ : Optional[Any] = [word], len(_lowercase ) lowercase__ : Optional[int] = max_width - width - len(_lowercase ) answer.append(" ".join(_lowercase ) + (remaining_spaces + 1) * " " ) return answer if __name__ == "__main__": from doctest import testmod testmod()
704
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class snake_case__(_UpperCamelCase ): """simple docstring""" @slow @require_torch def snake_case ( self : Any ): lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" ) lowercase__ : str = bertabert.config.encoder.vocab_size lowercase__ : List[str] = tokenizer.sep_token_id lowercase__ : Optional[Any] = tokenizer.cls_token_id lowercase__ : int = 128 lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) lowercase__ : Tuple = train_dataset.select(range(32 ) ) lowercase__ : Optional[int] = val_dataset.select(range(16 ) ) lowercase__ : int = 4 def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 ) lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 ) lowercase__ : Tuple = inputs.input_ids lowercase__ : Optional[int] = inputs.attention_mask lowercase__ : int = outputs.input_ids lowercase__ : Dict = outputs.input_ids.copy() lowercase__ : int = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] lowercase__ : List[Any] = outputs.attention_mask assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids ) assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ): lowercase__ : Union[str, Any] = pred.label_ids lowercase__ : Dict = pred.predictions # all unnecessary tokens are removed lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE ) return {"accuracy": accuracy} # map train dataset lowercase__ : List[str] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset lowercase__ : Any = val_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) lowercase__ : List[str] = self.get_auto_remove_tmp_dir() lowercase__ : int = SeqaSeqTrainingArguments( output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowercase__ : str = SeqaSeqTrainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , ) # start training trainer.train()
81
0
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase__ = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ): """simple docstring""" if attention_mask is None: lowercase__ : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowercase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowercase__ : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase__ : str = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase__ : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class snake_case__: """simple docstring""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=13 , SCREAMING_SNAKE_CASE : Optional[int]=7 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Dict=99 , SCREAMING_SNAKE_CASE : List[Any]=16 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=4 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=32 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Any=1 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Any=0.02 , ): lowercase__ : List[str] = parent lowercase__ : int = batch_size lowercase__ : Tuple = seq_length lowercase__ : Tuple = is_training lowercase__ : Dict = use_labels lowercase__ : Dict = vocab_size lowercase__ : Any = hidden_size lowercase__ : Tuple = num_hidden_layers lowercase__ : Tuple = num_attention_heads lowercase__ : Optional[Any] = intermediate_size lowercase__ : str = hidden_act lowercase__ : Any = hidden_dropout_prob lowercase__ : Dict = attention_probs_dropout_prob lowercase__ : Optional[int] = max_position_embeddings lowercase__ : int = eos_token_id lowercase__ : Tuple = pad_token_id lowercase__ : Union[str, Any] = bos_token_id lowercase__ : List[str] = initializer_range def snake_case ( self : List[str] ): lowercase__ : Dict = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowercase__ : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowercase__ : int = shift_tokens_right(__lowerCamelCase , 1 , 2 ) lowercase__ : Tuple = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , ) lowercase__ : Tuple = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, inputs_dict def snake_case ( self : Union[str, Any] ): lowercase__ : int = self.prepare_config_and_inputs() return config, inputs_dict def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Any = 20 lowercase__ : Any = model_class_name(__lowerCamelCase ) lowercase__ : Tuple = model.encode(inputs_dict["input_ids"] ) lowercase__ : int = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowercase__ : str = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase ) lowercase__ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) lowercase__ : Dict = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase__ : Dict = model.decode( decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , ) lowercase__ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) lowercase__ : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , ) lowercase__ : Dict = model.decode(__lowerCamelCase , __lowerCamelCase ) lowercase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : List[Any] = 20 lowercase__ : Optional[int] = model_class_name(__lowerCamelCase ) lowercase__ : List[str] = model.encode(inputs_dict["input_ids"] ) lowercase__ : int = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowercase__ : Optional[Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowercase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase ) lowercase__ : Dict = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase__ : Optional[int] = model.decode( decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , ) lowercase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) lowercase__ : Dict = model.decode( decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , ) lowercase__ : Dict = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase ) lowercase__ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) @require_flax class snake_case__(unittest.TestCase ): """simple docstring""" lowercase_ = 9_9 def snake_case ( self : Optional[Any] ): lowercase__ : Optional[Any] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) lowercase__ : Optional[Any] = input_ids.shape[0] lowercase__ : str = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def snake_case ( self : Tuple ): lowercase__ : List[str] = self._get_config_and_data() lowercase__ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase ) lowercase__ : Any = lm_model(input_ids=__lowerCamelCase ) lowercase__ : str = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , __lowerCamelCase ) def snake_case ( self : Tuple ): lowercase__ : str = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) lowercase__ : Optional[int] = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase ) lowercase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) lowercase__ : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) lowercase__ : int = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase ) lowercase__ : List[Any] = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , __lowerCamelCase ) def snake_case ( self : Optional[int] ): lowercase__ : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) lowercase__ : int = shift_tokens_right(__lowerCamelCase , 1 , 2 ) lowercase__ : List[Any] = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum() lowercase__ : Dict = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(__lowerCamelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class snake_case__(lowercase__ , unittest.TestCase , lowercase__ ): """simple docstring""" lowercase_ = True lowercase_ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def snake_case ( self : Optional[Any] ): lowercase__ : int = FlaxBlenderbotModelTester(self ) def snake_case ( self : Dict ): lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def snake_case ( self : List[Any] ): lowercase__ : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) lowercase__ : Union[str, Any] = model_class(__lowerCamelCase ) @jax.jit def encode_jitted(SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=None , **SCREAMING_SNAKE_CASE : int ): return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase ) with self.subTest("JIT Enabled" ): lowercase__ : Optional[Any] = encode_jitted(**__lowerCamelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : List[Any] = encode_jitted(**__lowerCamelCase ).to_tuple() self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def snake_case ( self : Optional[int] ): lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Union[str, Any] = model_class(__lowerCamelCase ) lowercase__ : str = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) lowercase__ : List[Any] = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ): return model.decode( decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , ) with self.subTest("JIT Enabled" ): lowercase__ : Optional[int] = decode_jitted(**__lowerCamelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = decode_jitted(**__lowerCamelCase ).to_tuple() self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def snake_case ( self : Any ): for model_class_name in self.all_model_classes: lowercase__ : str = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowercase__ : int = np.ones((1, 1) ) * model.config.eos_token_id lowercase__ : Optional[int] = model(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." ) @slow def snake_case ( self : List[Any] ): lowercase__ : List[Any] = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25} lowercase__ : Any = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True} lowercase__ : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=__lowerCamelCase ) lowercase__ : Dict = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" ) lowercase__ : Dict = ["Sam"] lowercase__ : List[Any] = tokenizer(__lowerCamelCase , return_tensors="jax" ) lowercase__ : Any = model.generate(**__lowerCamelCase , **__lowerCamelCase ) lowercase__ : List[Any] = "Sam is a great name. It means \"sun\" in Gaelic." lowercase__ : int = tokenizer.batch_decode(__lowerCamelCase , **__lowerCamelCase ) assert generated_txt[0].strip() == tgt_text
705
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowercase__ : Tuple = 192 lowercase__ : List[Any] = 768 lowercase__ : Tuple = 12 lowercase__ : List[str] = 3 lowercase__ : List[Any] = [800, 1_333] lowercase__ : Union[str, Any] = False elif yolos_name == "yolos_s_dWr": lowercase__ : str = 330 lowercase__ : List[Any] = 14 lowercase__ : Tuple = 6 lowercase__ : Optional[int] = 1_320 elif "yolos_s" in yolos_name: lowercase__ : Dict = 384 lowercase__ : str = 1_536 lowercase__ : List[Any] = 12 lowercase__ : List[Any] = 6 elif "yolos_b" in yolos_name: lowercase__ : int = [800, 1_344] lowercase__ : Tuple = 91 lowercase__ : Optional[int] = "huggingface/label-files" lowercase__ : Optional[int] = "coco-detection-id2label.json" lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : List[Any] = idalabel lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :] lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size] lowercase__ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : str = in_proj_weight[-config.hidden_size :, :] lowercase__ : Tuple = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "backbone" in name: lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" ) if "cls_token" in name: lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: lowercase__ : int = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowercase__ : Optional[int] = name.replace("attn" , "attention.self" ) if "norm1" in name: lowercase__ : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowercase__ : int = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" ) return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ ) if "qkv" in key: lowercase__ : Dict = key.split("." ) lowercase__ : List[Any] = int(key_split[2] ) lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowercase__ : str = val[:dim, :] lowercase__ : int = val[ dim : dim * 2, : ] lowercase__ : str = val[-dim:, :] else: lowercase__ : Tuple = val[:dim] lowercase__ : Any = val[dim : dim * 2] lowercase__ : Optional[Any] = val[-dim:] else: lowercase__ : Optional[Any] = val return orig_state_dict def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ ) # load original state_dict lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"] # load 🤗 model lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ ) model.eval() lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image, prepared by YolosImageProcessor lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512 lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ ) lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" ) lowercase__ : int = model(**lowerCamelCase__ ) lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes lowercase__ , lowercase__ : int = None, None if yolos_name == "yolos_ti": lowercase__ : Optional[int] = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) lowercase__ : Dict = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": lowercase__ : Any = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) lowercase__ : List[str] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": lowercase__ : Dict = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) lowercase__ : Tuple = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": lowercase__ : Optional[Any] = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) lowercase__ : int = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": lowercase__ : List[str] = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) lowercase__ : List[str] = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: lowercase__ : Tuple = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) lowercase__ : Optional[int] = model_mapping[yolos_name] image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" ) model.push_to_hub(lowerCamelCase__ , organization="hustvl" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
81
0
from torch import nn class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ): super().__init__() lowercase__ : Dict = class_size lowercase__ : Tuple = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowercase__ : Dict = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ ) def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowercase__ : int = self.mlp(UpperCAmelCase_ ) return logits
706
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''], '''processing_mgp_str''': ['''MgpstrProcessor'''], '''tokenization_mgp_str''': ['''MgpstrTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MgpstrModel''', '''MgpstrPreTrainedModel''', '''MgpstrForSceneTextRecognition''', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
0
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__(UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" lowercase_ = GPTSanJapaneseTokenizer lowercase_ = False lowercase_ = {'do_clean_text': False, 'add_prefix_space': False} def snake_case ( self : str ): super().setUp() # fmt: off lowercase__ : Optional[int] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on lowercase__ : str = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 lowercase__ : str = {'unk_token': '<unk>'} lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.emoji_file , "w" ) as emoji_writer: emoji_writer.write(json.dumps(_lowercase ) ) def snake_case ( self : str , **SCREAMING_SNAKE_CASE : Optional[Any] ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : Optional[int] = 'こんにちは、世界。 \nこんばんは、㔺界。😀' lowercase__ : Dict = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : Dict = self.get_input_output_texts(_lowercase ) lowercase__ : List[Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) lowercase__ : Optional[Any] = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase ) return text, ids def snake_case ( self : str ): pass # TODO add if relevant def snake_case ( self : Union[str, Any] ): pass # TODO add if relevant def snake_case ( self : Dict ): pass # TODO add if relevant def snake_case ( self : Any ): lowercase__ : Union[str, Any] = self.get_tokenizer() # Testing tokenization lowercase__ : Any = 'こんにちは、世界。 こんばんは、㔺界。' lowercase__ : Optional[Any] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] lowercase__ : List[Any] = tokenizer.tokenize(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) # Testing conversion to ids without special tokens lowercase__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] lowercase__ : List[Any] = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) # Testing conversion to ids with special tokens lowercase__ : List[str] = tokens + [tokenizer.unk_token] lowercase__ : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] lowercase__ : Dict = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) def snake_case ( self : List[Any] ): lowercase__ : Any = self.get_tokenizer() # Testing tokenization lowercase__ : Optional[Any] = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' lowercase__ : str = 'こんにちは、、、、世界。こんばんは、、、、世界。' lowercase__ : Optional[int] = tokenizer.encode(_lowercase ) lowercase__ : int = tokenizer.decode(_lowercase ) self.assertEqual(_lowercase , _lowercase ) @slow def snake_case ( self : str ): lowercase__ : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) # Testing tokenization lowercase__ : Tuple = 'こんにちは、世界。' lowercase__ : Optional[int] = 'こんばんは、㔺界。😀' lowercase__ : Optional[int] = 'こんにちは、世界。こんばんは、世界。😀' lowercase__ : Union[str, Any] = tokenizer.encode(prefix_text + input_text ) lowercase__ : str = tokenizer.encode("" , prefix_text=prefix_text + input_text ) lowercase__ : Union[str, Any] = tokenizer.encode(_lowercase , prefix_text=_lowercase ) lowercase__ : Optional[int] = tokenizer.decode(_lowercase ) lowercase__ : List[Any] = tokenizer.decode(_lowercase ) lowercase__ : List[Any] = tokenizer.decode(_lowercase ) self.assertEqual(_lowercase , _lowercase ) self.assertEqual(_lowercase , _lowercase ) self.assertEqual(_lowercase , _lowercase ) @slow def snake_case ( self : Optional[Any] ): lowercase__ : str = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) # Testing tokenization lowercase__ : Tuple = 'こんにちは、世界。' lowercase__ : str = 'こんばんは、㔺界。😀' lowercase__ : int = len(tokenizer.encode(_lowercase ) ) - 2 lowercase__ : str = len(tokenizer.encode(_lowercase ) ) - 2 lowercase__ : str = [1] + [0] * (len_prefix + len_text + 1) lowercase__ : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0] lowercase__ : Union[str, Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1) lowercase__ : Optional[int] = tokenizer(prefix_text + input_text ).token_type_ids lowercase__ : Optional[int] = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids lowercase__ : List[Any] = tokenizer(_lowercase , prefix_text=_lowercase ).token_type_ids self.assertListEqual(_lowercase , _lowercase ) self.assertListEqual(_lowercase , _lowercase ) self.assertListEqual(_lowercase , _lowercase ) @slow def snake_case ( self : Tuple ): lowercase__ : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) lowercase__ : Any = tokenizer.encode("あンいワ" ) lowercase__ : Union[str, Any] = tokenizer.encode("" , prefix_text="あンいワ" ) lowercase__ : List[str] = tokenizer.encode("いワ" , prefix_text="あン" ) self.assertEqual(tokenizer.decode(_lowercase ) , tokenizer.decode(_lowercase ) ) self.assertEqual(tokenizer.decode(_lowercase ) , tokenizer.decode(_lowercase ) ) self.assertNotEqual(_lowercase , _lowercase ) self.assertNotEqual(_lowercase , _lowercase ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def snake_case ( self : Dict ): lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) lowercase__ : Union[str, Any] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] lowercase__ : Optional[int] = tokenizer(_lowercase , padding=_lowercase ) lowercase__ : List[str] = tokenizer.batch_encode_plus(_lowercase , padding=_lowercase ) # fmt: off lowercase__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]] lowercase__ : str = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] lowercase__ : List[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , _lowercase ) self.assertListEqual(x_token.token_type_ids , _lowercase ) self.assertListEqual(x_token.attention_mask , _lowercase ) self.assertListEqual(x_token_a.input_ids , _lowercase ) self.assertListEqual(x_token_a.token_type_ids , _lowercase ) self.assertListEqual(x_token_a.attention_mask , _lowercase ) def snake_case ( self : Dict ): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def snake_case ( self : Dict ): # tokenizer has no padding token pass
707
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Optional[Any] ): lowercase__ : Dict = tempfile.mkdtemp() # fmt: off lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) lowercase__ : Tuple = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def snake_case ( self : Any ): lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case ( self : int ): lowercase__ : Optional[int] = self.get_tokenizer() lowercase__ : List[Any] = self.get_rust_tokenizer() lowercase__ : List[str] = self.get_image_processor() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_slow.save_pretrained(self.tmpdirname ) lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_fast.save_pretrained(self.tmpdirname ) lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] ): lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : int = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.prepare_image_inputs() lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case ( self : str ): lowercase__ : Tuple = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Tuple = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = "lower newer" lowercase__ : str = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE ): processor() def snake_case ( self : Optional[Any] ): lowercase__ : Dict = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : List[str] = self.get_image_processor() lowercase__ : List[str] = self.get_tokenizer() lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = "lower newer" lowercase__ : Union[str, Any] = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
81
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''} lowerCAmelCase__ = { '''vocab_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''', }, '''emoji_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''', }, } lowerCAmelCase__ = { '''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8, } def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" with open(__A , "r" , encoding="utf-8" ) as f: lowercase__ : List[Any] = json.loads(f.read() ) lowercase__ : Any = collections.OrderedDict() lowercase__ : Dict = collections.OrderedDict() lowercase__ : List[str] = collections.OrderedDict() with open(__A , "r" , encoding="utf-8" ) as f: lowercase__ : List[Any] = f.readlines() lowercase__ : List[Any] = [[t.rstrip("\n" )] if (t == ''',''' or ''',''' not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(__A ): lowercase__ : Optional[int] = b lowercase__ : Any = idx for wd in b: lowercase__ : Any = idx return vocab, raw_vocab, ids_to_tokens, emoji class snake_case__(__A ): """simple docstring""" lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ["""input_ids""", """attention_mask"""] def __init__( self : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE : str="<|endoftext|>" , SCREAMING_SNAKE_CASE : List[Any]="<|startoftext|>" , SCREAMING_SNAKE_CASE : Any="<|endoftext|>" , SCREAMING_SNAKE_CASE : List[Any]=False , **SCREAMING_SNAKE_CASE : Tuple , ): super().__init__( unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , do_clean_text=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) if not os.path.isfile(SCREAMING_SNAKE_CASE ): raise ValueError( f"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained""" " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(SCREAMING_SNAKE_CASE ): raise ValueError( f"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google""" " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) lowercase__ : str = do_clean_text lowercase__ : int = load_vocab_and_emoji(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def snake_case ( self : List[str] ): return len(self.raw_vocab ) def snake_case ( self : Tuple ): return dict(self.raw_vocab , **self.added_tokens_encoder ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str] ): return self.subword_tokenizer.tokenize(SCREAMING_SNAKE_CASE , clean=self.do_clean_text ) def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ): return self.vocab.get(SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : List[str] ): return self.subword_tokenizer.convert_id_to_token(SCREAMING_SNAKE_CASE ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : str = ''''''.join(SCREAMING_SNAKE_CASE ).strip() return out_string def snake_case ( self : Any , SCREAMING_SNAKE_CASE : "Conversation" ): lowercase__ : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) + [self.eos_token_id] ) if len(SCREAMING_SNAKE_CASE ) > self.model_max_length: lowercase__ : Dict = input_ids[-self.model_max_length :] return input_ids def snake_case ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ): lowercase__ : Optional[Any] = 0 if os.path.isdir(SCREAMING_SNAKE_CASE ): lowercase__ : Dict = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: lowercase__ : int = ( (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ : Union[str, Any] = ( (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file'''] ) with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" " Please check that the vocabulary is not corrupted!" ) lowercase__ : str = token_index writer.write(",".join(SCREAMING_SNAKE_CASE ) + "\n" ) index += 1 with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer: json.dump(self.emoji , SCREAMING_SNAKE_CASE ) return vocab_file, emoji_file class snake_case__(__A ): """simple docstring""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): lowercase__ : Optional[int] = vocab # same as swe lowercase__ : Dict = ids_to_tokens # same as bpe lowercase__ : Dict = emoji lowercase__ : List[Any] = np.max([len(SCREAMING_SNAKE_CASE ) for w in self.vocab.keys()] ) lowercase__ : Tuple = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) lowercase__ : Union[str, Any] = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) lowercase__ : Optional[Any] = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) lowercase__ : int = re.compile( r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) lowercase__ : str = re.compile( r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) lowercase__ : Union[str, Any] = re.compile( r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) lowercase__ : List[Any] = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿''' lowercase__ : Optional[int] = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟''' lowercase__ : Any = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self : Union[str, Any] ): return len(self.ids_to_tokens ) def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str ): lowercase__ : str = self.content_repattera.sub("<URL>" , SCREAMING_SNAKE_CASE ) lowercase__ : int = self.content_repattera.sub("<EMAIL>" , SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self.content_repattera.sub("<TEL>" , SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self.content_repattera.sub("<DATE>" , SCREAMING_SNAKE_CASE ) lowercase__ : Dict = self.content_repattera.sub("<DATE>" , SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.content_repattera.sub("<PRICE>" , SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: lowercase__ : Dict = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" ) return content def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple=False ): lowercase__ : Dict = text.replace(" " , "<SP>" ) lowercase__ : Optional[int] = text.replace(" " , "<SP>" ) lowercase__ : List[Any] = text.replace("\r\n" , "<BR>" ) lowercase__ : Optional[int] = text.replace("\n" , "<BR>" ) lowercase__ : Tuple = text.replace("\r" , "<BR>" ) lowercase__ : Any = text.replace("\t" , "<TAB>" ) lowercase__ : Dict = text.replace("—" , "ー" ) lowercase__ : Dict = text.replace("−" , "ー" ) for k, v in self.emoji["emoji"].items(): if k in text: lowercase__ : int = text.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if clean: lowercase__ : Any = self.clean_text(SCREAMING_SNAKE_CASE ) def check_simbol(SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : List[str] = x.encode() if len(SCREAMING_SNAKE_CASE ) == 1 and len(SCREAMING_SNAKE_CASE ) == 2: lowercase__ : Any = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xC_2_A_1 and c <= 0xC_2_B_F) or (c >= 0xC_7_8_0 and c <= 0xC_7_8_3) or (c >= 0xC_A_B_9 and c <= 0xC_B_B_F) or (c >= 0xC_C_8_0 and c <= 0xC_D_A_2) ): return True return False def checkuae(SCREAMING_SNAKE_CASE : str ): lowercase__ : Tuple = x.encode() if len(SCREAMING_SNAKE_CASE ) == 1 and len(SCREAMING_SNAKE_CASE ) == 3: lowercase__ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xE_2_8_0_8_0 and c <= 0xE_2_B_0_7_F: return True return False lowercase__ : Dict = 0 lowercase__ : Tuple = [] while pos < len(SCREAMING_SNAKE_CASE ): lowercase__ : str = min(len(SCREAMING_SNAKE_CASE ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3 lowercase__ : List[Any] = [] # (token_id, token, pos) for e in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , -1 ): lowercase__ : Any = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(SCREAMING_SNAKE_CASE ) > 2: lowercase__ : Tuple = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(SCREAMING_SNAKE_CASE ) > 0: # the smallest token_id is adopted lowercase__ : Union[str, Any] = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[0] )[0] result.append(SCREAMING_SNAKE_CASE ) lowercase__ : int = e else: lowercase__ : Optional[Any] = pos + 1 lowercase__ : str = text[pos:end] if check_simbol(SCREAMING_SNAKE_CASE ): result.append("<KIGOU>" ) elif checkuae(SCREAMING_SNAKE_CASE ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) lowercase__ : Union[str, Any] = end return result def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]="\n" ): lowercase__ : List[str] = [] lowercase__ : Any = [] lowercase__ : Any = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(SCREAMING_SNAKE_CASE ) > 0: words.append(bytearray(SCREAMING_SNAKE_CASE ).decode("utf-8" , errors="replace" ) ) lowercase__ : Tuple = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(SCREAMING_SNAKE_CASE ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: words.append(bytearray(SCREAMING_SNAKE_CASE ).decode("utf-8" , errors="replace" ) ) lowercase__ : Union[str, Any] = ''''''.join(SCREAMING_SNAKE_CASE ) return text
708
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : int ): lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : str = -1 lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : int = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = -1 lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer.decode(greedy_ids[0] ) lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE ) thread.start() lowercase__ : List[Any] = "" for new_text in streamer: streamer_text += new_text self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = -1 lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :] lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : Optional[Any] = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" ) lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = -1 lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n" lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def snake_case ( self : Optional[int] ): lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : int = -1 lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 ) lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(SCREAMING_SNAKE_CASE ): lowercase__ : List[str] = "" for new_text in streamer: streamer_text += new_text
81
0
from __future__ import annotations def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Dict = list(range(len(__lowerCAmelCase ) ) ) lowercase__ : List[Any] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )] index.sort(key=lambda lowerCamelCase__ : ratio[i] , reverse=__lowerCAmelCase ) lowercase__ : Optional[Any] = 0 lowercase__ : Any = [0] * len(__lowerCAmelCase ) for i in index: if weight[i] <= capacity: lowercase__ : int = 1 max_value += value[i] capacity -= weight[i] else: lowercase__ : int = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
709
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = 42 class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ): super().__init__() lowercase__ : str = layers_per_block lowercase__ : int = torch.nn.Convad( SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Union[str, Any] = None lowercase__ : Optional[int] = nn.ModuleList([] ) # down lowercase__ : Dict = block_out_channels[0] for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ : List[str] = output_channel lowercase__ : Dict = block_out_channels[i] lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1 lowercase__ : Union[str, Any] = get_down_block( SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) self.down_blocks.append(SCREAMING_SNAKE_CASE ) # mid lowercase__ : Optional[int] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) # out lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 ) lowercase__ : Union[str, Any] = nn.SiLU() lowercase__ : Tuple = 2 * out_channels if double_z else out_channels lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 ) lowercase__ : Tuple = False def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : List[str] = x lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE ) if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ): def custom_forward(*SCREAMING_SNAKE_CASE : Dict ): return module(*SCREAMING_SNAKE_CASE ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) # middle lowercase__ : int = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) else: for down_block in self.down_blocks: lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) # middle lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE ) else: # down for down_block in self.down_blocks: lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE ) # middle lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE ) # post-process lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE ) return sample class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ): super().__init__() lowercase__ : List[str] = layers_per_block lowercase__ : int = nn.Convad( SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Optional[Any] = None lowercase__ : Dict = nn.ModuleList([] ) lowercase__ : List[str] = in_channels if norm_type == "spatial" else None # mid lowercase__ : str = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) # up lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) ) lowercase__ : Dict = reversed_block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ : Tuple = output_channel lowercase__ : List[Any] = reversed_block_out_channels[i] lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1 lowercase__ : Dict = get_up_block( SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , ) self.up_blocks.append(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = output_channel # out if norm_type == "spatial": lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE ) else: lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 ) lowercase__ : Union[str, Any] = nn.SiLU() lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 ) lowercase__ : List[Any] = False def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ): lowercase__ : Tuple = z lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ): def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ): return module(*SCREAMING_SNAKE_CASE ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle lowercase__ : List[str] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) else: # middle lowercase__ : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # middle lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # post-process if latent_embeds is None: lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE ) else: lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE ) return sample class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ): super().__init__() lowercase__ : List[Any] = n_e lowercase__ : List[str] = vq_embed_dim lowercase__ : Optional[Any] = beta lowercase__ : List[str] = legacy lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) lowercase__ : Union[str, Any] = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) lowercase__ : Tuple = self.used.shape[0] lowercase__ : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": lowercase__ : Any = self.re_embed lowercase__ : Tuple = self.re_embed + 1 print( f"""Remapping {self.n_e} indices to {self.re_embed} indices. """ f"""Using {self.unknown_index} for unknown indices.""" ) else: lowercase__ : str = n_e lowercase__ : Union[str, Any] = sane_index_shape def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Any = inds.shape assert len(SCREAMING_SNAKE_CASE ) > 1 lowercase__ : List[str] = inds.reshape(ishape[0] , -1 ) lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long() lowercase__ : Dict = match.argmax(-1 ) lowercase__ : Dict = match.sum(2 ) < 1 if self.unknown_index == "random": lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: lowercase__ : List[Any] = self.unknown_index return new.reshape(SCREAMING_SNAKE_CASE ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ): lowercase__ : List[Any] = inds.shape assert len(SCREAMING_SNAKE_CASE ) > 1 lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 ) lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE ) if self.re_embed > self.used.shape[0]: # extra token lowercase__ : int = 0 # simply set to zero lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE ) return back.reshape(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ): # reshape z -> (batch, height, width, channel) and flatten lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous() lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 ) lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape ) lowercase__ : Dict = None lowercase__ : int = None # compute loss for embedding if not self.legacy: lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients lowercase__ : Union[str, Any] = z + (z_q - z).detach() # reshape back to match original input shape lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ): # shape specifying (batch, height, width, channel) if self.remap is not None: lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again # get quantized latent vectors lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE ) if shape is not None: lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE ) # reshape back to match original input shape lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ): lowercase__ : Dict = parameters lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 ) lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 ) lowercase__ : Optional[int] = deterministic lowercase__ : Tuple = torch.exp(0.5 * self.logvar ) lowercase__ : Optional[int] = torch.exp(self.logvar ) if self.deterministic: lowercase__ : Any = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype lowercase__ : Tuple = randn_tensor( self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype ) lowercase__ : str = self.mean + self.std * sample return x def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) lowercase__ : Any = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple ): return self.mean
81
0
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class snake_case__: """simple docstring""" lowercase_ = 42 lowercase_ = None lowercase_ = None def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Optional[Any] = Node(1 ) lowercase__ : Union[str, Any] = Node(2 ) lowercase__ : Optional[int] = Node(3 ) lowercase__ : Optional[int] = Node(4 ) lowercase__ : List[Any] = Node(5 ) return tree def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : list[Any] = [] if root is None: return output lowercase__ : int = deque([root] ) while process_queue: lowercase__ : int = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : list[Any] = [] def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(lowerCamelCase__ , lowerCamelCase__ ) return output def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : list[Any] = [] def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(lowerCamelCase__ , lowerCamelCase__ ) return output def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if root is None: return [] lowercase__ : list[Sequence[Node | None]] = [] lowercase__ : str = 0 lowercase__ : Tuple = height(lowerCamelCase__ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(lowerCamelCase__ , lowerCamelCase__ ) ) lowercase__ : Union[str, Any] = 1 else: output.append(get_nodes_from_right_to_left(lowerCamelCase__ , lowerCamelCase__ ) ) lowercase__ : Optional[int] = 0 return output def __lowerCamelCase ( ): # Main function for testing. """simple docstring""" lowercase__ : int = make_tree() print(F"""In-order Traversal: {inorder(lowerCamelCase__ )}""" ) print(F"""Pre-order Traversal: {preorder(lowerCamelCase__ )}""" ) print(F"""Post-order Traversal: {postorder(lowerCamelCase__ )}""" , "\n" ) print(F"""Height of Tree: {height(lowerCamelCase__ )}""" , "\n" ) print("Complete Level Order Traversal: " ) print(level_order(lowerCamelCase__ ) , "\n" ) print("Level-wise order Traversal: " ) for level in range(1 , height(lowerCamelCase__ ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase__ , level=lowerCamelCase__ ) ) print("\nZigZag order Traversal: " ) print(zigzag(lowerCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
710
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = DiTPipeline lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowercase_ = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowercase_ = False def snake_case ( self : int ): torch.manual_seed(0 ) lowercase__ : Optional[Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , ) lowercase__ : Dict = AutoencoderKL() lowercase__ : Any = DDIMScheduler() lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ): if str(SCREAMING_SNAKE_CASE ).startswith("mps" ): lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) lowercase__ : int = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def snake_case ( self : Any ): lowercase__ : List[Any] = "cpu" lowercase__ : str = self.get_dummy_components() lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE ) pipe.to(SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images lowercase__ : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 ) def snake_case ( self : str ): self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def snake_case ( self : Tuple ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : str ): lowercase__ : List[Any] = torch.manual_seed(0 ) lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"] lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-2 def snake_case ( self : Union[str, Any] ): lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) lowercase__ : Dict = ["vase", "umbrella"] lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = torch.manual_seed(0 ) lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-1
81
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
711
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = (CMStochasticIterativeScheduler,) lowercase_ = 1_0 def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ): lowercase__ : Any = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**SCREAMING_SNAKE_CASE ) return config def snake_case ( self : Optional[int] ): lowercase__ : Tuple = 10 lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) lowercase__ : Any = scheduler.timesteps[0] lowercase__ : Optional[int] = scheduler.timesteps[1] lowercase__ : List[Any] = self.dummy_sample lowercase__ : Tuple = 0.1 * sample lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def snake_case ( self : Dict ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : Any = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Any = 1 scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = scheduler.timesteps lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : List[str] = self.dummy_model() lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(SCREAMING_SNAKE_CASE ): # 1. scale model input lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Dict = pred_prev_sample lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 192.7_614 ) < 1E-2 assert abs(result_mean.item() - 0.2_510 ) < 1E-3 def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config() lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = [106, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = scheduler.timesteps lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : Optional[int] = self.dummy_model() lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Union[str, Any] = pred_prev_sample lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 347.6_357 ) < 1E-2 assert abs(result_mean.item() - 0.4_527 ) < 1E-3 def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config() lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : int = [39, 30, 12, 15, 0] with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : Dict = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0] lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE ) with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
81
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class snake_case__(unittest.TestCase ): """simple docstring""" @slow def snake_case ( self : List[str] ): lowercase__ : Union[str, Any] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) lowercase__ : Any = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowercase__ : Optional[int] = model(_a )["""last_hidden_state"""] lowercase__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. lowercase__ : Optional[int] = tf.convert_to_tensor( [[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
712
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class snake_case__: """simple docstring""" lowercase_ = 42 # setable values lowercase_ = 42 lowercase_ = 42 lowercase_ = None @classmethod def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ): return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE ) @dataclass class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = 42 class snake_case__(_UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers] lowercase_ = 42 @property def snake_case ( self : Dict ): return True @register_to_config def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ): lowercase__ : List[Any] = dtype def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ): if common is None: lowercase__ : Dict = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype ) lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ): return sample def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ): lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ): lowercase__ : Tuple = state.common.alphas_cumprod[t] lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: lowercase__ : Dict = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) ) elif variance_type == "fixed_large": lowercase__ : Union[str, Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log lowercase__ : List[Any] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": lowercase__ : List[Any] = variance lowercase__ : Union[str, Any] = state.common.betas[t] lowercase__ : Tuple = (predicted_variance + 1) / 2 lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log return variance def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ): lowercase__ : Tuple = timestep if key is None: lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 ) else: lowercase__ : Any = None # 1. compute alphas, betas lowercase__ : Dict = state.common.alphas_cumprod[t] lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) lowercase__ : Optional[Any] = 1 - alpha_prod_t lowercase__ : Optional[int] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowercase__ : Optional[Any] = model_output elif self.config.prediction_type == "v_prediction": lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """ " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 ) lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) lowercase__ : Optional[int] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ): return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ): return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __len__( self : Tuple ): return self.config.num_train_timesteps
81
0
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case__(_snake_case ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ): super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase__ : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , **SCREAMING_SNAKE_CASE : Any , ): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Union[str, Any] = 1 elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Dict = len(SCREAMING_SNAKE_CASE ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(SCREAMING_SNAKE_CASE )}.""" ) # get prompt text embeddings lowercase__ : Dict = self.tokenizer( SCREAMING_SNAKE_CASE , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) lowercase__ : Optional[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowercase__ : Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) lowercase__ : List[Any] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: lowercase__ : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowercase__ , lowercase__ , lowercase__ : str = text_embeddings.shape lowercase__ : int = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE , 1 ) lowercase__ : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowercase__ : Optional[Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowercase__ : int = 42 if negative_prompt is None: lowercase__ : Optional[Any] = [""] elif type(SCREAMING_SNAKE_CASE ) is not type(SCREAMING_SNAKE_CASE ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE )} !=""" f""" {type(SCREAMING_SNAKE_CASE )}.""" ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = [negative_prompt] elif batch_size != len(SCREAMING_SNAKE_CASE ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" " the batch size of `prompt`." ) else: lowercase__ : int = negative_prompt lowercase__ : str = text_input_ids.shape[-1] lowercase__ : Optional[int] = self.tokenizer( SCREAMING_SNAKE_CASE , padding="max_length" , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors="pt" , ) lowercase__ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowercase__ : List[str] = uncond_embeddings.shape[1] lowercase__ : Dict = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 ) lowercase__ : int = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__ : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowercase__ : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowercase__ : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) lowercase__ : Any = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowercase__ : List[str] = torch.randn( SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device="cpu" , dtype=SCREAMING_SNAKE_CASE ).to(self.device ) lowercase__ : Optional[int] = torch.randn(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device="cpu" , dtype=SCREAMING_SNAKE_CASE ).to( self.device ) else: lowercase__ : Any = torch.randn( SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = torch.randn(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=SCREAMING_SNAKE_CASE ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) lowercase__ : List[Any] = latents_reference.to(self.device ) lowercase__ : str = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images lowercase__ : str = (latents_shape[3] - latents_shape_reference[3]) // 2 lowercase__ : Any = (latents_shape[2] - latents_shape_reference[2]) // 2 lowercase__ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx lowercase__ : Optional[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy lowercase__ : List[Any] = 0 if dx < 0 else dx lowercase__ : List[str] = 0 if dy < 0 else dy lowercase__ : int = max(-dx , 0 ) lowercase__ : str = max(-dy , 0 ) # import pdb # pdb.set_trace() lowercase__ : int = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowercase__ : Tuple = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowercase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase__ : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowercase__ : Tuple = {} if accepts_eta: lowercase__ : Optional[Any] = eta for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE ) ): # expand the latents if we are doing classifier free guidance lowercase__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__ : Union[str, Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # predict the noise residual lowercase__ : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE ).sample # perform guidance if do_classifier_free_guidance: lowercase__ , lowercase__ : str = noise_pred.chunk(2 ) lowercase__ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowercase__ : Optional[int] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Dict = 1 / 0.18_215 * latents lowercase__ : Dict = self.vae.decode(SCREAMING_SNAKE_CASE ).sample lowercase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: lowercase__ : Tuple = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).to( self.device ) lowercase__ , lowercase__ : Optional[int] = self.safety_checker( images=SCREAMING_SNAKE_CASE , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: lowercase__ : str = None if output_type == "pil": lowercase__ : Any = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE , nsfw_content_detected=SCREAMING_SNAKE_CASE )
713
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ): super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: lowercase__ : Optional[Any] = ( f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) lowercase__ : int = dict(scheduler.config ) lowercase__ : Any = 1 lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: lowercase__ : Optional[Any] = ( f"""The configuration file of this scheduler: {scheduler} has not set the configuration""" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = dict(scheduler.config ) lowercase__ : Union[str, Any] = True lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE ) if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase__ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) lowercase__ : Union[str, Any] = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case ( self : Optional[Any] ): if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ): lowercase__ : Dict = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE ) lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size ) # Run inpainting pipeline with the generated mask lowercase__ : int = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
81
0
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = 0 lowercase_ = False lowercase_ = 3.0 class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Union[str, Any] ): self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def snake_case ( self : str ): lowercase__ : List[str] = GradScalerKwargs(init_scale=1_024 , growth_factor=2 ) AcceleratorState._reset_state() lowercase__ : Any = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) lowercase__ : str = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_000 ) self.assertEqual(scaler._enabled , _UpperCAmelCase ) @require_multi_gpu def snake_case ( self : Union[str, Any] ): lowercase__ : Any = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase__ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True) lowerCAmelCase__ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase__ = torch.nn.Linear(1_0_0, 2_0_0) lowerCAmelCase__ = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase__ = '''''' lowerCAmelCase__ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4) if observed_bucket_cap_map != 1_5: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
714
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] lowercase__ : str = True if "large" in model_name or "huge" in model_name else False lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowercase__ : int = [3, 3, 3, 3] lowercase__ : Tuple = [5, 5, 5, 5] elif "fl4" in model_name: lowercase__ : Optional[Any] = [4, 4, 4, 4] lowercase__ : Optional[Any] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowercase__ : Union[str, Any] = [3, 3, 3, 3] if "lrf" in model_name: lowercase__ : Union[str, Any] = [3, 3, 3, 3] else: lowercase__ : Tuple = [2, 2, 2, 2] if "tiny" in model_name: lowercase__ : Optional[Any] = 96 elif "small" in model_name: lowercase__ : List[str] = 96 elif "base" in model_name: lowercase__ : str = 128 elif "large" in model_name: lowercase__ : Any = 192 elif "xlarge" in model_name: lowercase__ : str = 256 elif "huge" in model_name: lowercase__ : List[str] = 352 # set label information lowercase__ : Tuple = "huggingface/label-files" if "large" in model_name or "huge" in model_name: lowercase__ : List[Any] = "imagenet-22k-id2label.json" else: lowercase__ : Optional[int] = "imagenet-1k-id2label.json" lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : int = {v: k for k, v in idalabel.items()} lowercase__ : str = FocalNetConfig( embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , ) return config def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "patch_embed.proj" in name: lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: lowercase__ : List[str] = "encoder." + name if "encoder.layers" in name: lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: lowercase__ : List[str] = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": lowercase__ : List[str] = "layernorm.weight" if name == "norm.bias": lowercase__ : List[Any] = "layernorm.bias" if "head" in name: lowercase__ : Optional[int] = name.replace("head" , "classifier" ) else: lowercase__ : Union[str, Any] = "focalnet." + name return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): """simple docstring""" lowercase__ : List[Any] = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on lowercase__ : Union[str, Any] = model_name_to_url[model_name] print("Checkpoint URL: " , lowerCamelCase__ ) lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ ) lowercase__ : List[str] = val lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ ) lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ ) model.eval() # load state dict model.load_state_dict(lowerCamelCase__ ) # verify conversion lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : int = BitImageProcessor( do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , ) lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" ) lowercase__ : Any = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 ) lowercase__ : List[Any] = model(**lowerCamelCase__ ) lowercase__ : int = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase__ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
81
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase__ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } lowerCAmelCase__ = { "distilbert-base-uncased": 5_1_2, "distilbert-base-uncased-distilled-squad": 5_1_2, "distilbert-base-cased": 5_1_2, "distilbert-base-cased-distilled-squad": 5_1_2, "distilbert-base-german-cased": 5_1_2, "distilbert-base-multilingual-cased": 5_1_2, } lowerCAmelCase__ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class snake_case__(__lowerCAmelCase ): """simple docstring""" lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = ["""input_ids""", """attention_mask"""] lowercase_ = DistilBertTokenizer def __init__( self : Tuple , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[Any]="[UNK]" , SCREAMING_SNAKE_CASE : int="[SEP]" , SCREAMING_SNAKE_CASE : Optional[int]="[PAD]" , SCREAMING_SNAKE_CASE : List[Any]="[CLS]" , SCREAMING_SNAKE_CASE : int="[MASK]" , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Optional[Any]=None , **SCREAMING_SNAKE_CASE : str , ): super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) lowercase__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars ): lowercase__ : Dict = getattr(_UpperCamelCase , normalizer_state.pop("type" ) ) lowercase__ : int = do_lower_case lowercase__ : Optional[Any] = strip_accents lowercase__ : Dict = tokenize_chinese_chars lowercase__ : Optional[int] = normalizer_class(**_UpperCamelCase ) lowercase__ : List[str] = do_lower_case def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str=None ): lowercase__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ): lowercase__ : Union[str, Any] = [self.sep_token_id] lowercase__ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ): lowercase__ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
715
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """informer""" lowercase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ): # time series specific configuration lowercase__ : Any = prediction_length lowercase__ : List[str] = context_length or prediction_length lowercase__ : Tuple = distribution_output lowercase__ : Union[str, Any] = loss lowercase__ : Union[str, Any] = input_size lowercase__ : List[str] = num_time_features lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowercase__ : List[str] = scaling lowercase__ : str = num_dynamic_real_features lowercase__ : Tuple = num_static_real_features lowercase__ : List[str] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Dict = cardinality else: lowercase__ : Dict = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Union[str, Any] = embedding_dimension else: lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase__ : Dict = num_parallel_samples # Transformer architecture configuration lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features lowercase__ : Optional[Any] = d_model lowercase__ : int = encoder_attention_heads lowercase__ : Tuple = decoder_attention_heads lowercase__ : List[Any] = encoder_ffn_dim lowercase__ : List[str] = decoder_ffn_dim lowercase__ : List[str] = encoder_layers lowercase__ : Tuple = decoder_layers lowercase__ : Union[str, Any] = dropout lowercase__ : List[Any] = attention_dropout lowercase__ : str = activation_dropout lowercase__ : int = encoder_layerdrop lowercase__ : Union[str, Any] = decoder_layerdrop lowercase__ : Tuple = activation_function lowercase__ : str = init_std lowercase__ : Tuple = use_cache # Informer lowercase__ : Union[str, Any] = attention_type lowercase__ : Union[str, Any] = sampling_factor lowercase__ : Tuple = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def snake_case ( self : str ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
81
0
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case__(a__ ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] = None , ): super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A ) # create a imagenet -> id dictionary for easier use lowercase__ : Union[str, Any] = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split("," ): lowercase__ : Tuple = int(_A ) lowercase__ : Dict = dict(sorted(self.labels.items() ) ) def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ): if not isinstance(_A , _A ): lowercase__ : Tuple = list(_A ) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple = 4.0 , SCREAMING_SNAKE_CASE : Optional[Any] = None , SCREAMING_SNAKE_CASE : Tuple = 50 , SCREAMING_SNAKE_CASE : Dict = "pil" , SCREAMING_SNAKE_CASE : Tuple = True , ): lowercase__ : str = len(_A ) lowercase__ : List[Any] = self.transformer.config.sample_size lowercase__ : Dict = self.transformer.config.in_channels lowercase__ : str = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) lowercase__ : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents lowercase__ : Optional[Any] = torch.tensor(_A , device=self.device ).reshape(-1 ) lowercase__ : str = torch.tensor([1_000] * batch_size , device=self.device ) lowercase__ : Dict = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: lowercase__ : int = latent_model_input[: len(_A ) // 2] lowercase__ : int = torch.cat([half, half] , dim=0 ) lowercase__ : Union[str, Any] = self.scheduler.scale_model_input(_A , _A ) lowercase__ : Union[str, Any] = t if not torch.is_tensor(_A ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) lowercase__ : Union[str, Any] = latent_model_input.device.type == 'mps' if isinstance(_A , _A ): lowercase__ : List[str] = torch.floataa if is_mps else torch.floataa else: lowercase__ : Union[str, Any] = torch.intaa if is_mps else torch.intaa lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: lowercase__ : Any = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : Dict = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output lowercase__ : Optional[int] = self.transformer( _A , timestep=_A , class_labels=_A ).sample # perform guidance if guidance_scale > 1: lowercase__ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] lowercase__ : Optional[Any] = torch.split(_A , len(_A ) // 2 , dim=0 ) lowercase__ : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps) lowercase__ : int = torch.cat([half_eps, half_eps] , dim=0 ) lowercase__ : List[Any] = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: lowercase__ : Any = torch.split(_A , _A , dim=1 ) else: lowercase__ : Optional[int] = noise_pred # compute previous image: x_t -> x_t-1 lowercase__ : Tuple = self.scheduler.step(_A , _A , _A ).prev_sample if guidance_scale > 1: lowercase__ : Union[str, Any] = latent_model_input.chunk(2 , dim=0 ) else: lowercase__ : int = latent_model_input lowercase__ : List[str] = 1 / self.vae.config.scaling_factor * latents lowercase__ : Dict = self.vae.decode(_A ).sample lowercase__ : int = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase__ : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase__ : List[str] = self.numpy_to_pil(_A ) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A )
716
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) else: lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) lowercase__ : int = ["key_proj", "value_proj", "query_proj"] lowercase__ : str = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: lowercase__ : Union[str, Any] = key.split("." ) if attributes[0] == "lm_head": lowercase__ : Tuple = prophet lowercase__ : Tuple = prophet_old else: lowercase__ : Tuple = prophet.prophetnet lowercase__ : List[str] = prophet_old.model lowercase__ : int = False for attribute in attributes: if attribute in mapping: lowercase__ : int = mapping[attribute] if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0: lowercase__ : Dict = attribute elif hasattr(lowerCamelCase__ , lowerCamelCase__ ): lowercase__ : Optional[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowercase__ : Any = old_model.weight logger.info(F"""{attribute} is initialized.""" ) lowercase__ : str = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowercase__ : Tuple = old_model.bias logger.info(F"""{attribute} is initialized""" ) lowercase__ : str = True break elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ): lowercase__ : str = old_model.in_proj_weight.shape[0] // 3 lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowercase__ : Tuple = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) lowercase__ : Union[str, Any] = True break if attribute.isdigit(): lowercase__ : str = model[int(lowerCamelCase__ )] lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )] else: lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ ) if old_attribute == "": lowercase__ : str = old_model else: if not hasattr(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError(F"""{old_model} does not have {old_attribute}""" ) lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ ) if not is_key_init: raise ValueError(F"""{key} was not correctly initialized!""" ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
81
0
from math import factorial def __lowerCamelCase ( lowerCamelCase__ = 100 ): """simple docstring""" return sum(map(_lowerCamelCase , str(factorial(_lowerCamelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
717
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = GPTaTokenizer lowercase_ = GPTaTokenizerFast lowercase_ = True lowercase_ = {"""add_prefix_space""": True} lowercase_ = False def snake_case ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : List[str] = {"unk_token": "<unk>"} lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ): kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : List[str] = "lower newer" lowercase__ : Optional[Any] = "lower newer" return input_text, output_text def snake_case ( self : Any ): lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__ : Dict = "lower newer" lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokens + [tokenizer.unk_token] lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if not self.test_rust_tokenizer: return lowercase__ : Dict = self.get_tokenizer() lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" # Testing tokenization lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids without special tokens lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids with special tokens lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing the unknown token lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token] lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # Simple input lowercase__ : Dict = "This is a simple input" lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[int] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) def snake_case ( self : Any ): lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input lowercase__ : Optional[int] = "This is a simple input" lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"] lowercase__ : List[Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[Any] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] lowercase__ : Any = tokenizer.pad_token_id lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def snake_case ( self : str ): lowercase__ : List[str] = "$$$" lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = "This is a simple input" lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Optional[int] = tokenizer.bos_token_id lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids ) lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def snake_case ( self : Optional[int] ): pass def snake_case ( self : Tuple ): # TODO: change to self.get_tokenizers() when the fast version is implemented lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase__ : str = "Encode this." lowercase__ : List[Any] = "This one too please." lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.encode_plus( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , ) lowercase__ : Tuple = encoded_sequence_dict["input_ids"] lowercase__ : int = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) lowercase__ : List[str] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE ) ] lowercase__ : Any = [x for x in filtered_sequence if x is not None] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @require_tokenizers class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Union[str, Any] ): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("test_opt" ) lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" ) lowercase__ : Dict = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE ) lowercase__ : int = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # Same as above self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def snake_case ( self : Tuple ): lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "bos" lowercase__ : List[Any] = tokenizer.get_vocab()["bos"] lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Union[str, Any] = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # We changed the bos token self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("./tok" ) lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
81
0
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Optional[int] = { '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } lowercase__ : Optional[int] = Dataset.from_dict(a_ ) return dataset class snake_case__(_UpperCAmelCase ): """simple docstring""" def snake_case ( self : Tuple ): lowercase__ : Any = get_dataset() lowercase__ : int = make_duplicate_clusters(lowerCamelCase_ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def snake_case ( self : List[str] ): lowercase__ : List[Any] = get_dataset() lowercase__ : Optional[Any] = deduplicate_dataset(lowerCamelCase_ ) self.assertEqual(len(lowerCamelCase_ ) , 2 ) print(lowerCamelCase_ ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCamelCase_ )
718
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
0
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case__(_UpperCAmelCase ): """simple docstring""" lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'OwlViTImageProcessor' lowercase_ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[int]=None , **SCREAMING_SNAKE_CASE : List[str] ): lowercase__ : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCamelCase , ) lowercase__ : List[str] = kwargs.pop("feature_extractor" ) lowercase__ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__UpperCamelCase , __UpperCamelCase ) def __call__( self : str , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Any="max_length" , SCREAMING_SNAKE_CASE : List[str]="np" , **SCREAMING_SNAKE_CASE : Dict ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__UpperCamelCase , __UpperCamelCase ) or (isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(text[0] , __UpperCamelCase )): lowercase__ : Dict = [self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )] elif isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(text[0] , __UpperCamelCase ): lowercase__ : Tuple = [] # Maximum number of queries across batch lowercase__ : str = max([len(__UpperCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__UpperCamelCase ) != max_num_queries: lowercase__ : Tuple = t + [" "] * (max_num_queries - len(__UpperCamelCase )) lowercase__ : Optional[Any] = self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ) encodings.append(__UpperCamelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": lowercase__ : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowercase__ : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowercase__ : Optional[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowercase__ : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowercase__ : Tuple = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) lowercase__ : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowercase__ : int = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowercase__ : List[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) lowercase__ : Tuple = BatchEncoding() lowercase__ : Any = input_ids lowercase__ : List[str] = attention_mask if query_images is not None: lowercase__ : List[str] = BatchEncoding() lowercase__ : List[str] = self.image_processor( __UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ).pixel_values lowercase__ : Optional[int] = query_pixel_values if images is not None: lowercase__ : List[str] = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ) if text is not None and images is not None: lowercase__ : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowercase__ : Tuple = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase ) def snake_case ( self : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ): return self.image_processor.post_process(*__UpperCamelCase , **__UpperCamelCase ) def snake_case ( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ): return self.image_processor.post_process_object_detection(*__UpperCamelCase , **__UpperCamelCase ) def snake_case ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[str] ): return self.image_processor.post_process_image_guided_detection(*__UpperCamelCase , **__UpperCamelCase ) def snake_case ( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def snake_case ( self : Any , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Dict ): return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) @property def snake_case ( self : Union[str, Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , ) return self.image_processor_class @property def snake_case ( self : Dict ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , ) return self.image_processor
719
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case__: """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ): lowercase__ : Union[str, Any] = parent lowercase__ : Optional[int] = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : List[Any] = patch_size lowercase__ : Any = num_channels lowercase__ : Optional[int] = is_training lowercase__ : Dict = use_labels lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : Optional[int] = hidden_act lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : Union[str, Any] = attention_probs_dropout_prob lowercase__ : List[Any] = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : Optional[int] = mask_ratio lowercase__ : Union[str, Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase__ : List[Any] = (image_size // patch_size) ** 2 lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def snake_case ( self : int ): lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : str = None if self.use_labels: lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any] = self.get_config() return config, pixel_values, labels def snake_case ( self : Tuple ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ): lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) # expected sequence length = num_patches lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2 lowercase__ : List[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase__ : Dict = 1 lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def snake_case ( self : Optional[int] ): lowercase__ : int = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs lowercase__ : str = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def snake_case ( self : List[str] ): lowercase__ : List[Any] = TFViTMAEModelTester(self ) lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def snake_case ( self : Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : Optional[int] ): lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowercase__ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) ) def snake_case ( self : Optional[Any] ): lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Union[str, Any] = [*signature.parameters.keys()] lowercase__ : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): # make the mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = outputs_dict[0].numpy() lowercase__ : Optional[int] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def snake_case ( self : str ): # make the mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : Tuple = {} for k, v in inputs_dict.items(): if tf.is_tensor(SCREAMING_SNAKE_CASE ): lowercase__ : Any = v.numpy() else: lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE ) return inputs_np_dict for model_class in self.all_model_classes: lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ): # make masks reproducible np.random.seed(2 ) lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase__ : Optional[int] = tf_noise super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(SCREAMING_SNAKE_CASE ) if module_member_name.endswith("MainLayer" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )] for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE ) } lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) inputs_dict.update({"noise": noise} ) for main_layer_class in tf_main_layer_classes: lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) ) lowercase__ : str = model(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" ) model.save(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = tf.keras.models.load_model( SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model ) lowercase__ : Dict = model(SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def snake_case ( self : Optional[int] ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) if model_class.__name__ == "TFViTMAEModel": lowercase__ : str = outputs.last_hidden_state.numpy() lowercase__ : Optional[Any] = 0 else: lowercase__ : Optional[Any] = outputs.logits.numpy() lowercase__ : Optional[int] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) if model_class.__name__ == "TFViTMAEModel": lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy() lowercase__ : Optional[int] = 0 else: lowercase__ : str = after_outputs["logits"].numpy() lowercase__ : Tuple = 0 lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 ) def snake_case ( self : List[Any] ): # make mask reproducible np.random.seed(2 ) lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) lowercase__ : str = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(SCREAMING_SNAKE_CASE ) lowercase__ : int = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowercase__ : Any = model_class.from_config(model.config ) lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model new_model.set_weights(model.get_weights() ) lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def snake_case ( self : List[Any] ): pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def snake_case ( self : str ): pass @slow def snake_case ( self : List[Any] ): lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self : Any ): return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def snake_case ( self : Union[str, Any] ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ) lowercase__ : Optional[Any] = self.default_image_processor lowercase__ : Union[str, Any] = prepare_img() lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase__ : Union[str, Any] = ViTMAEConfig() lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) ) # forward pass lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE ) # verify the logits lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = tf.convert_to_tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
81
0
import math def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Any = [True] * n lowercase__ : Tuple = False lowercase__ : int = False lowercase__ : List[str] = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): lowercase__ : List[str] = i * 2 while index < n: lowercase__ : int = False lowercase__ : str = index + i lowercase__ : int = [2] for i in range(3 , lowerCamelCase__ , 2 ): if is_prime[i]: primes.append(lowerCamelCase__ ) return primes def __lowerCamelCase ( lowerCamelCase__ = 999_966_663_333 ): """simple docstring""" lowercase__ : int = math.floor(math.sqrt(lowerCamelCase__ ) ) + 100 lowercase__ : Dict = prime_sieve(lowerCamelCase__ ) lowercase__ : List[str] = 0 lowercase__ : Union[str, Any] = 0 lowercase__ : Optional[int] = primes[prime_index] while (last_prime**2) <= limit: lowercase__ : Optional[int] = primes[prime_index + 1] lowercase__ : int = last_prime**2 lowercase__ : Optional[int] = next_prime**2 # Get numbers divisible by lps(current) lowercase__ : Tuple = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) lowercase__ : Union[str, Any] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps lowercase__ : Optional[int] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair lowercase__ : List[str] = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
720
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO Update this lowerCAmelCase__ = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """esm""" def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = vocab_size lowercase__ : int = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : List[str] = initializer_range lowercase__ : Optional[Any] = layer_norm_eps lowercase__ : Optional[int] = position_embedding_type lowercase__ : Optional[int] = use_cache lowercase__ : Optional[int] = emb_layer_norm_before lowercase__ : List[str] = token_dropout lowercase__ : Optional[int] = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("No esmfold_config supplied for folding model, using default values." ) lowercase__ : Dict = EsmFoldConfig() elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE ) lowercase__ : Dict = esmfold_config if vocab_list is None: logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" ) lowercase__ : List[str] = get_default_vocab_list() else: lowercase__ : List[Any] = vocab_list else: lowercase__ : List[Any] = None lowercase__ : List[str] = None if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ): raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" ) def snake_case ( self : List[str] ): lowercase__ : Optional[Any] = super().to_dict() if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ): lowercase__ : Dict = self.esmfold_config.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = None lowercase_ = True lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = 0 lowercase_ = True lowercase_ = False lowercase_ = 1_2_8 lowercase_ = None def snake_case ( self : Optional[int] ): if self.trunk is None: lowercase__ : Dict = TrunkConfig() elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ): lowercase__ : int = TrunkConfig(**self.trunk ) def snake_case ( self : Union[str, Any] ): lowercase__ : int = asdict(self ) lowercase__ : Any = self.trunk.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = 4_8 lowercase_ = 1_0_2_4 lowercase_ = 1_2_8 lowercase_ = 3_2 lowercase_ = 3_2 lowercase_ = 3_2 lowercase_ = 0 lowercase_ = 0 lowercase_ = False lowercase_ = 4 lowercase_ = 1_2_8 lowercase_ = None def snake_case ( self : Dict ): if self.structure_module is None: lowercase__ : str = StructureModuleConfig() elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got" f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got" f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got" f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got" f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def snake_case ( self : Optional[Any] ): lowercase__ : int = asdict(self ) lowercase__ : Optional[int] = self.structure_module.to_dict() return output @dataclass class snake_case__: """simple docstring""" lowercase_ = 3_8_4 lowercase_ = 1_2_8 lowercase_ = 1_6 lowercase_ = 1_2_8 lowercase_ = 1_2 lowercase_ = 4 lowercase_ = 8 lowercase_ = 0.1 lowercase_ = 8 lowercase_ = 1 lowercase_ = 2 lowercase_ = 7 lowercase_ = 1_0 lowercase_ = 1e-8 lowercase_ = 1e5 def snake_case ( self : Dict ): return asdict(self ) def __lowerCamelCase ( ): """simple docstring""" return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
81
0
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class snake_case__(SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int = None , SCREAMING_SNAKE_CASE : Any = None , SCREAMING_SNAKE_CASE : Tuple = None , SCREAMING_SNAKE_CASE : Optional[int] = False , SCREAMING_SNAKE_CASE : Tuple = False , SCREAMING_SNAKE_CASE : Optional[Any] = None , **SCREAMING_SNAKE_CASE : str , ): super().__init__( _lowercase , split=_lowercase , features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , num_proc=_lowercase , **_lowercase , ) lowercase__ : Optional[Any] = path_or_paths if isinstance(_lowercase , _lowercase ) else {self.split: path_or_paths} lowercase__ : Optional[int] = Text( cache_dir=_lowercase , data_files=_lowercase , features=_lowercase , **_lowercase , ) def snake_case ( self : Optional[int] ): if self.streaming: lowercase__ : Optional[int] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ : Optional[int] = None lowercase__ : Optional[int] = None lowercase__ : Union[str, Any] = None lowercase__ : List[Any] = None self.builder.download_and_prepare( download_config=_lowercase , download_mode=_lowercase , verification_mode=_lowercase , base_path=_lowercase , num_proc=self.num_proc , ) lowercase__ : Optional[Any] = self.builder.as_dataset( split=self.split , verification_mode=_lowercase , in_memory=self.keep_in_memory ) return dataset
721
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """deformable_detr""" lowercase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : List[Any] = backbone_config.get("model_type" ) lowercase__ : Any = CONFIG_MAPPING[backbone_model_type] lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE ) lowercase__ : int = use_timm_backbone lowercase__ : Optional[Any] = backbone_config lowercase__ : Union[str, Any] = num_channels lowercase__ : List[Any] = num_queries lowercase__ : List[Any] = max_position_embeddings lowercase__ : Union[str, Any] = d_model lowercase__ : Union[str, Any] = encoder_ffn_dim lowercase__ : Optional[Any] = encoder_layers lowercase__ : Optional[Any] = encoder_attention_heads lowercase__ : Optional[Any] = decoder_ffn_dim lowercase__ : List[Any] = decoder_layers lowercase__ : Optional[int] = decoder_attention_heads lowercase__ : str = dropout lowercase__ : Union[str, Any] = attention_dropout lowercase__ : List[str] = activation_dropout lowercase__ : Optional[Any] = activation_function lowercase__ : Optional[Any] = init_std lowercase__ : str = init_xavier_std lowercase__ : Any = encoder_layerdrop lowercase__ : int = auxiliary_loss lowercase__ : Dict = position_embedding_type lowercase__ : int = backbone lowercase__ : Optional[Any] = use_pretrained_backbone lowercase__ : List[Any] = dilation # deformable attributes lowercase__ : Dict = num_feature_levels lowercase__ : Optional[int] = encoder_n_points lowercase__ : Any = decoder_n_points lowercase__ : int = two_stage lowercase__ : int = two_stage_num_proposals lowercase__ : Union[str, Any] = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowercase__ : List[Any] = class_cost lowercase__ : Optional[int] = bbox_cost lowercase__ : Any = giou_cost # Loss coefficients lowercase__ : List[str] = mask_loss_coefficient lowercase__ : int = dice_loss_coefficient lowercase__ : Any = bbox_loss_coefficient lowercase__ : Any = giou_loss_coefficient lowercase__ : Optional[int] = eos_coefficient lowercase__ : int = focal_alpha lowercase__ : Dict = disable_custom_kernels super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def snake_case ( self : List[Any] ): return self.encoder_attention_heads @property def snake_case ( self : Union[str, Any] ): return self.d_model def snake_case ( self : str ): lowercase__ : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowercase__ : int = self.backbone_config.to_dict() lowercase__ : Union[str, Any] = self.__class__.model_type return output
81
0
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node lowerCAmelCase__ = 4 lowerCAmelCase__ = 3 class snake_case__(_lowercase ): """simple docstring""" pass def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" for shard in shards: for i in range(snake_case__ ): yield {"i": i, "shard": shard} def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Any = int(os.environ["RANK"] ) lowercase__ : List[str] = int(os.environ["WORLD_SIZE"] ) lowercase__ : Tuple = ArgumentParser() parser.add_argument("--streaming" , type=snake_case__ ) parser.add_argument("--local_rank" , type=snake_case__ ) parser.add_argument("--num_workers" , type=snake_case__ , default=0 ) lowercase__ : Any = parser.parse_args() lowercase__ : Dict = args.streaming lowercase__ : Tuple = args.num_workers lowercase__ : int = {"shards": [F"""shard_{shard_idx}""" for shard_idx in range(snake_case__ )]} lowercase__ : List[str] = IterableDataset.from_generator(snake_case__ , gen_kwargs=snake_case__ ) if not streaming: lowercase__ : List[str] = Dataset.from_list(list(snake_case__ ) ) lowercase__ : Optional[int] = split_dataset_by_node(snake_case__ , rank=snake_case__ , world_size=snake_case__ ) lowercase__ : Any = torch.utils.data.DataLoader(snake_case__ , num_workers=snake_case__ ) lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : Dict = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) lowercase__ : Dict = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
700
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowerCAmelCase__ = logging.get_logger(__name__) class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = ["""pixel_values"""] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ): super().__init__(**SCREAMING_SNAKE_CASE ) lowercase__ : str = do_rescale lowercase__ : Optional[Any] = rescale_factor lowercase__ : Any = do_pad lowercase__ : Optional[Any] = pad_size def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ): return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ): lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height lowercase__ : List[Any] = (old_width // size + 1) * size - old_width return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ): lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : str = do_pad if do_pad is not None else self.do_pad lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_pad: lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images] lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] lowercase__ : Optional[Any] = {"pixel_values": images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
81
0
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Optional[int] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" lowercase__ : Optional[int] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert("RGB" ) lowercase__ : List[Any] = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ), ] ) lowercase__ : Optional[int] = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE ) return image def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "visual_encoder" in key: lowercase__ : Any = re.sub("visual_encoder*" , "vision_model.encoder" , __SCREAMING_SNAKE_CASE ) if "blocks" in key: lowercase__ : str = re.sub(R"blocks" , "layers" , __SCREAMING_SNAKE_CASE ) if "attn" in key: lowercase__ : int = re.sub(R"attn" , "self_attn" , __SCREAMING_SNAKE_CASE ) if "norm1" in key: lowercase__ : int = re.sub(R"norm1" , "layer_norm1" , __SCREAMING_SNAKE_CASE ) if "norm2" in key: lowercase__ : Dict = re.sub(R"norm2" , "layer_norm2" , __SCREAMING_SNAKE_CASE ) if "encoder.norm" in key: lowercase__ : int = re.sub(R"encoder.norm" , "post_layernorm" , __SCREAMING_SNAKE_CASE ) if "encoder.patch_embed.proj" in key: lowercase__ : List[str] = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , __SCREAMING_SNAKE_CASE ) if "encoder.pos_embed" in key: lowercase__ : Optional[Any] = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , __SCREAMING_SNAKE_CASE ) if "encoder.cls_token" in key: lowercase__ : Dict = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , __SCREAMING_SNAKE_CASE ) if "self_attn" in key: lowercase__ : Tuple = re.sub(R"self_attn.proj" , "self_attn.projection" , __SCREAMING_SNAKE_CASE ) return key @torch.no_grad() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" if config_path is not None: lowercase__ : Tuple = BlipConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) else: lowercase__ : Union[str, Any] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) lowercase__ : Any = BlipForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval() lowercase__ : str = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth" lowercase__ : Any = blip_decoder(pretrained=__SCREAMING_SNAKE_CASE , image_size=384 , vit="base" ) lowercase__ : Optional[Any] = pt_model.eval() lowercase__ : List[str] = pt_model.state_dict() for key in modified_state_dict.copy(): lowercase__ : str = modified_state_dict.pop(__SCREAMING_SNAKE_CASE ) lowercase__ : Dict = rename_key(__SCREAMING_SNAKE_CASE ) lowercase__ : Any = value hf_model.load_state_dict(__SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = 384 lowercase__ : List[str] = load_demo_image(image_size=__SCREAMING_SNAKE_CASE , device="cpu" ) lowercase__ : List[Any] = BertTokenizer.from_pretrained("bert-base-uncased" ) lowercase__ : Any = tokenizer(["a picture of"] ).input_ids lowercase__ : Optional[int] = hf_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102] lowercase__ : int = hf_model.generate(__SCREAMING_SNAKE_CASE ) assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(__SCREAMING_SNAKE_CASE ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' lowercase__ : str = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth" ) lowercase__ : Optional[int] = blip_vqa(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit="base" ) vqa_model.eval() lowercase__ : List[str] = vqa_model.state_dict() for key in modified_state_dict.copy(): lowercase__ : Optional[Any] = modified_state_dict.pop(__SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = rename_key(__SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = value lowercase__ : Any = BlipForQuestionAnswering(__SCREAMING_SNAKE_CASE ) hf_vqa_model.load_state_dict(__SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = ["How many dogs are in this image?"] lowercase__ : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids lowercase__ : Tuple = hf_vqa_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" ) lowercase__ : List[str] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth" lowercase__ : Optional[int] = blip_itm(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit="base" ) itm_model.eval() lowercase__ : Union[str, Any] = itm_model.state_dict() for key in modified_state_dict.copy(): lowercase__ : List[Any] = modified_state_dict.pop(__SCREAMING_SNAKE_CASE ) lowercase__ : int = rename_key(__SCREAMING_SNAKE_CASE ) lowercase__ : int = value lowercase__ : int = BlipForImageTextRetrieval(__SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = ["A picture of a woman with a dog sitting in a beach"] lowercase__ : Any = tokenizer( __SCREAMING_SNAKE_CASE , return_tensors="pt" , padding="max_length" , truncation=__SCREAMING_SNAKE_CASE , max_length=35 , ).input_ids hf_itm_model.load_state_dict(__SCREAMING_SNAKE_CASE ) hf_itm_model.eval() lowercase__ : Union[str, Any] = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE ) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCAmelCase__ = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
701
import argparse import json from tqdm import tqdm def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , ) parser.add_argument( "--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , ) parser.add_argument( "--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , ) lowercase__ : Dict = parser.parse_args() with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open( args.gold_data_path , "w" ) as gold_file: lowercase__ : List[str] = json.load(lowerCamelCase__ ) for dpr_record in tqdm(lowerCamelCase__ ): lowercase__ : Any = dpr_record["question"] lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + "\n" ) gold_file.write("\t".join(lowerCamelCase__ ) + "\n" ) if __name__ == "__main__": main()
81
0
import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) lowerCAmelCase__ = logging.getLogger() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : str = "\n".join(lowerCamelCase__ ) Path(lowerCamelCase__ ).open("w" ).writelines(lowerCamelCase__ ) lowerCAmelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCAmelCase__ = '''sshleifer/bart-tiny-random''' lowerCAmelCase__ = '''sshleifer/tiny-mbart''' lowerCAmelCase__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class snake_case__(_UpperCamelCase ): """simple docstring""" def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : Any = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" lowercase__ : List[Any] = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() lowercase__ : List[str] = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."] _dump_articles(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : str = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" ) lowercase__ : Optional[int] = "translation_en_to_de" if model == T5_TINY else "summarization" lowercase__ : Optional[Any] = f"""\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n """.split() with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ): run_generate() assert Path(SCREAMING_SNAKE_CASE ).exists() # os.remove(Path(output_file_name)) def snake_case ( self : Tuple ): self.run_eval_tester(SCREAMING_SNAKE_CASE ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ): self.run_eval_tester(SCREAMING_SNAKE_CASE ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ): lowercase__ : Tuple = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source" lowercase__ : int = input_file_name.parent / "utest_output.txt" assert not output_file_name.exists() lowercase__ : List[Any] = { "en": ["Machine learning is great, isn\'t it?", "I like to eat bananas", "Tomorrow is another great day!"], "de": [ "Maschinelles Lernen ist großartig, oder?", "Ich esse gerne Bananen", "Morgen ist wieder ein toller Tag!", ], } lowercase__ : List[str] = Path(self.get_auto_remove_tmp_dir() ) lowercase__ : List[str] = str(tmp_dir / "scores.json" ) lowercase__ : Dict = str(tmp_dir / "val.target" ) _dump_articles(SCREAMING_SNAKE_CASE , text["en"] ) _dump_articles(SCREAMING_SNAKE_CASE , text["de"] ) lowercase__ : int = "translation_en_to_de" if model == T5_TINY else "summarization" lowercase__ : List[str] = f"""\n run_eval_search.py\n {model}\n {str(SCREAMING_SNAKE_CASE )}\n {str(SCREAMING_SNAKE_CASE )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n """.split() testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] ) with patch.object(SCREAMING_SNAKE_CASE , "argv" , SCREAMING_SNAKE_CASE ): with CaptureStdout() as cs: run_search() lowercase__ : int = [" num_beams | length_penalty", model, "Best score args"] lowercase__ : int = ["Info"] if "translation" in task: expected_strings.append("bleu" ) else: expected_strings.extend(SCREAMING_SNAKE_CASE ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(SCREAMING_SNAKE_CASE ).exists() os.remove(Path(SCREAMING_SNAKE_CASE ) )
702
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase__ = logging.getLogger(__name__) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : str = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , ) parser.add_argument( "--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , ) parser.add_argument( "--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , ) parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] ) parser.add_argument( "--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , ) parser.add_argument( "--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." , ) parser.add_argument( "--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." , ) lowercase__ : Optional[int] = parser.parse_args() return args def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" def fn(lowerCamelCase__ ): return tokenizer(examples["text"] ) return fn def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : str = [] for i in range(len(tokenized_data["input_ids"] ) ): lowercase__ : str = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ ) lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ ) lowercase__ : str = example.SerializeToString() records.append(lowerCamelCase__ ) return records def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit ) lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) ) print(F"""Limiting the dataset to {args.limit} entries.""" ) lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) lowercase__ : Any = os.path.join(args.output_dir , args.split ) if not os.path.exists(lowerCamelCase__ ): os.makedirs(lowerCamelCase__ ) else: lowercase__ : str = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. lowercase__ : str = tokenize_function(lowerCamelCase__ ) lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowerCamelCase__ ): # Concatenate all texts. lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()} lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 lowercase__ : List[str] = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. lowercase__ : Optional[int] = { k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )] for k, t in concatenated_examples.items() } return result lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 ) lowercase__ : str = 0 lowercase__ : str = 0 for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ): lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size] lowercase__ : str = len(dataset_snapshot["input_ids"] ) lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" ) lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ ) with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file: for i in range(len(lowerCamelCase__ ) ): lowercase__ : Optional[int] = serialized_examples[i] out_file.write(lowerCamelCase__ ) print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) ) shard_count += 1 total_records += records_containing with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f: print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = parse_args() main(args)
81
0
from collections.abc import Iterable from typing import Any class snake_case__: """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : int | None = None ): lowercase__ : Dict = value lowercase__ : Node | None = None # Added in order to delete a node easier lowercase__ : Node | None = None lowercase__ : Node | None = None def __repr__( self : List[str] ): from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 ) class snake_case__: """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Node | None = None ): lowercase__ : Union[str, Any] = root def __str__( self : Optional[int] ): return str(self.root ) def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Node , SCREAMING_SNAKE_CASE : Node | None ): if new_children is not None: # reset its kids lowercase__ : Any = node.parent if node.parent is not None: # reset its parent if self.is_right(__A ): # If it is the right children lowercase__ : List[str] = new_children else: lowercase__ : Dict = new_children else: lowercase__ : Union[str, Any] = new_children def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Node ): if node.parent and node.parent.right: return node == node.parent.right return False def snake_case ( self : List[Any] ): return self.root is None def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : List[str] = Node(__A ) # create a new Node if self.empty(): # if Tree is empty lowercase__ : List[str] = new_node # set its root else: # Tree is not empty lowercase__ : Any = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: lowercase__ : Optional[int] = new_node # We insert the new node in a leaf break else: lowercase__ : int = parent_node.left else: if parent_node.right is None: lowercase__ : Union[str, Any] = new_node break else: lowercase__ : Dict = parent_node.right lowercase__ : int = parent_node def snake_case ( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Tuple ): for value in values: self.__insert(__A ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] ): if self.empty(): raise IndexError("Warning: Tree is empty! please use another." ) else: lowercase__ : str = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: lowercase__ : List[Any] = node.left if value < node.value else node.right return node def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Node | None = None ): if node is None: if self.root is None: return None lowercase__ : List[Any] = self.root if not self.empty(): while node.right is not None: lowercase__ : Any = node.right return node def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Node | None = None ): if node is None: lowercase__ : str = self.root if self.root is None: return None if not self.empty(): lowercase__ : int = self.root while node.left is not None: lowercase__ : str = node.left return node def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : int ): lowercase__ : Tuple = self.search(__A ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__A , __A ) elif node.left is None: # Has only right children self.__reassign_nodes(__A , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__A , node.left ) else: lowercase__ : Any = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore lowercase__ : int = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Node | None ): if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=None ): if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : Node | None ): if node: self.inorder(__A , node.left ) arr.append(node.value ) self.inorder(__A , node.right ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Node ): lowercase__ : list[int] = [] self.inorder(__A , __A ) # append all values to list using inorder traversal return arr[k - 1] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Union[str, Any] = [] if curr_node is not None: lowercase__ : Union[str, Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7) lowercase__ : int = BinarySearchTree() for i in testlist: t.insert(snake_case_ ) # Prints all the elements of the list in order traversal print(snake_case_ ) if t.search(6 ) is not None: print("The value 6 exists" ) else: print("The value 6 doesn't exist" ) if t.search(-1 ) is not None: print("The value -1 exists" ) else: print("The value -1 doesn't exist" ) if not t.empty(): print("Max Value: " , t.get_max().value ) # type: ignore print("Min Value: " , t.get_min().value ) # type: ignore for i in testlist: t.remove(snake_case_ ) print(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
703
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__: """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ): lowercase__ : Union[str, Any] = parent lowercase__ : Optional[int] = batch_size lowercase__ : Optional[Any] = image_size lowercase__ : Tuple = num_channels lowercase__ : Tuple = num_stages lowercase__ : List[Any] = hidden_sizes lowercase__ : Any = depths lowercase__ : List[str] = is_training lowercase__ : int = use_labels lowercase__ : Union[str, Any] = intermediate_size lowercase__ : List[Any] = hidden_act lowercase__ : Tuple = num_labels lowercase__ : Optional[Any] = initializer_range lowercase__ : Optional[Any] = out_features lowercase__ : Union[str, Any] = out_indices lowercase__ : Tuple = scope def snake_case ( self : Dict ): lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Dict = None if self.use_labels: lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def snake_case ( self : Tuple ): return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ): lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase__ : str = None lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def snake_case ( self : Dict ): lowercase__ : str = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs lowercase__ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict def snake_case ( self : Optional[Any] ): lowercase__ : Optional[Any] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowercase_ = ( {"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def snake_case ( self : List[Any] ): lowercase__ : List[str] = ConvNextVaModelTester(self ) lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def snake_case ( self : Optional[int] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self : List[str] ): return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def snake_case ( self : Dict ): pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def snake_case ( self : Union[str, Any] ): pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : Optional[int] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() lowercase__ : List[str] = True if model_class.__name__ in [ *get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE ), ]: continue lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.train() lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss loss.backward() def snake_case ( self : Optional[Any] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels() lowercase__ : Optional[Any] = False lowercase__ : Dict = True if ( model_class.__name__ in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )] or not model_class.supports_gradient_checkpointing ): continue lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.gradient_checkpointing_enable() model.train() lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss loss.backward() def snake_case ( self : int ): lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : str = [*signature.parameters.keys()] lowercase__ : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict ): lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ): lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ : Dict = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Optional[Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) @slow def snake_case ( self : List[str] ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self : List[Any] ): return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = self.default_image_processor lowercase__ : int = prepare_img() lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE ) # verify the logits lowercase__ : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
81
0
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case__(unittest.TestCase ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Dict=99 , SCREAMING_SNAKE_CASE : Any=32 , SCREAMING_SNAKE_CASE : List[Any]=5 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=37 , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : List[Any]=512 , SCREAMING_SNAKE_CASE : List[str]=16 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : List[Any]=4 , ): lowercase__ : Optional[int] = parent lowercase__ : int = batch_size lowercase__ : Union[str, Any] = seq_length lowercase__ : List[str] = is_training lowercase__ : List[str] = use_attention_mask lowercase__ : List[str] = use_token_type_ids lowercase__ : List[str] = use_labels lowercase__ : Any = vocab_size lowercase__ : int = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : Dict = hidden_act lowercase__ : Optional[int] = hidden_dropout_prob lowercase__ : Optional[Any] = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : List[Any] = type_vocab_size lowercase__ : int = type_sequence_label_size lowercase__ : Optional[Any] = initializer_range lowercase__ : str = num_choices def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : int = None if self.use_attention_mask: lowercase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Any = None if self.use_token_type_ids: lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : Union[str, Any] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case ( self : Union[str, Any] ): lowercase__ : Any = self.prepare_config_and_inputs() lowercase__ : Any = config_and_inputs lowercase__ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class snake_case__(lowercase__ , unittest.TestCase ): """simple docstring""" lowercase_ = True lowercase_ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def snake_case ( self : Optional[Any] ): lowercase__ : int = FlaxRoFormerModelTester(self ) @slow def snake_case ( self : Optional[int] ): for model_class_name in self.all_model_classes: lowercase__ : str = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class snake_case__(unittest.TestCase ): """simple docstring""" @slow def snake_case ( self : Any ): lowercase__ : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) lowercase__ : Optional[Any] = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )[0] lowercase__ : Dict = 50_000 lowercase__ : Any = (1, 6, vocab_size) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) lowercase__ : str = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
704
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class snake_case__(_UpperCamelCase ): """simple docstring""" @slow @require_torch def snake_case ( self : Any ): lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" ) lowercase__ : str = bertabert.config.encoder.vocab_size lowercase__ : List[str] = tokenizer.sep_token_id lowercase__ : Optional[Any] = tokenizer.cls_token_id lowercase__ : int = 128 lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) lowercase__ : Tuple = train_dataset.select(range(32 ) ) lowercase__ : Optional[int] = val_dataset.select(range(16 ) ) lowercase__ : int = 4 def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 ) lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 ) lowercase__ : Tuple = inputs.input_ids lowercase__ : Optional[int] = inputs.attention_mask lowercase__ : int = outputs.input_ids lowercase__ : Dict = outputs.input_ids.copy() lowercase__ : int = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] lowercase__ : List[Any] = outputs.attention_mask assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids ) assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ): lowercase__ : Union[str, Any] = pred.label_ids lowercase__ : Dict = pred.predictions # all unnecessary tokens are removed lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE ) return {"accuracy": accuracy} # map train dataset lowercase__ : List[str] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset lowercase__ : Any = val_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) lowercase__ : List[str] = self.get_auto_remove_tmp_dir() lowercase__ : int = SeqaSeqTrainingArguments( output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowercase__ : str = SeqaSeqTrainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , ) # start training trainer.train()
81
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase__ = { '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
705
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowercase__ : Tuple = 192 lowercase__ : List[Any] = 768 lowercase__ : Tuple = 12 lowercase__ : List[str] = 3 lowercase__ : List[Any] = [800, 1_333] lowercase__ : Union[str, Any] = False elif yolos_name == "yolos_s_dWr": lowercase__ : str = 330 lowercase__ : List[Any] = 14 lowercase__ : Tuple = 6 lowercase__ : Optional[int] = 1_320 elif "yolos_s" in yolos_name: lowercase__ : Dict = 384 lowercase__ : str = 1_536 lowercase__ : List[Any] = 12 lowercase__ : List[Any] = 6 elif "yolos_b" in yolos_name: lowercase__ : int = [800, 1_344] lowercase__ : Tuple = 91 lowercase__ : Optional[int] = "huggingface/label-files" lowercase__ : Optional[int] = "coco-detection-id2label.json" lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : List[Any] = idalabel lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :] lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size] lowercase__ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : str = in_proj_weight[-config.hidden_size :, :] lowercase__ : Tuple = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "backbone" in name: lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" ) if "cls_token" in name: lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: lowercase__ : int = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowercase__ : Optional[int] = name.replace("attn" , "attention.self" ) if "norm1" in name: lowercase__ : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowercase__ : int = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" ) return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ ) if "qkv" in key: lowercase__ : Dict = key.split("." ) lowercase__ : List[Any] = int(key_split[2] ) lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowercase__ : str = val[:dim, :] lowercase__ : int = val[ dim : dim * 2, : ] lowercase__ : str = val[-dim:, :] else: lowercase__ : Tuple = val[:dim] lowercase__ : Any = val[dim : dim * 2] lowercase__ : Optional[Any] = val[-dim:] else: lowercase__ : Optional[Any] = val return orig_state_dict def __lowerCamelCase ( ): """simple docstring""" lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ ) # load original state_dict lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"] # load 🤗 model lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ ) model.eval() lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image, prepared by YolosImageProcessor lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512 lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ ) lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" ) lowercase__ : int = model(**lowerCamelCase__ ) lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes lowercase__ , lowercase__ : int = None, None if yolos_name == "yolos_ti": lowercase__ : Optional[int] = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) lowercase__ : Dict = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": lowercase__ : Any = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) lowercase__ : List[str] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": lowercase__ : Dict = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) lowercase__ : Tuple = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": lowercase__ : Optional[Any] = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) lowercase__ : int = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": lowercase__ : List[str] = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) lowercase__ : List[str] = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: lowercase__ : Tuple = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) lowercase__ : Optional[int] = model_mapping[yolos_name] image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" ) model.push_to_hub(lowerCamelCase__ , organization="hustvl" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase__ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
81
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case__(__a , __a , unittest.TestCase ): """simple docstring""" lowercase_ = StableDiffusionXLImgaImgPipeline lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} lowercase_ = PipelineTesterMixin.required_optional_params - {"""latents"""} lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def snake_case ( self : Optional[Any] ): torch.manual_seed(0 ) lowercase__ : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=a_ , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) lowercase__ : List[str] = EulerDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , ) torch.manual_seed(0 ) lowercase__ : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowercase__ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , ) lowercase__ : Dict = CLIPTextModel(a_ ) lowercase__ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=a_ ) lowercase__ : Tuple = CLIPTextModelWithProjection(a_ ) lowercase__ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=a_ ) lowercase__ : Union[str, Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict=0 ): lowercase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowercase__ : Optional[int] = image / 2 + 0.5 if str(a_ ).startswith("mps" ): lowercase__ : Optional[int] = torch.manual_seed(a_ ) else: lowercase__ : str = torch.Generator(device=a_ ).manual_seed(a_ ) lowercase__ : str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def snake_case ( self : int ): lowercase__ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase__ : Tuple = self.get_dummy_components() lowercase__ : List[Any] = StableDiffusionXLImgaImgPipeline(**a_ ) lowercase__ : Optional[Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) lowercase__ : str = self.get_dummy_inputs(a_ ) lowercase__ : List[Any] = sd_pipe(**a_ ).images lowercase__ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase__ : List[Any] = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case ( self : List[str] ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def snake_case ( self : Union[str, Any] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def snake_case ( self : str ): pass def snake_case ( self : Dict ): lowercase__ : Optional[Any] = self.get_dummy_components() lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline(**a_ ) lowercase__ : Union[str, Any] = sd_pipe.to(a_ ) lowercase__ : int = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) # forward without prompt embeds lowercase__ : str = self.get_dummy_inputs(a_ ) lowercase__ : Any = 3 * ["""this is a negative prompt"""] lowercase__ : Optional[Any] = negative_prompt lowercase__ : Optional[Any] = 3 * [inputs["""prompt"""]] lowercase__ : Any = sd_pipe(**a_ ) lowercase__ : List[str] = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowercase__ : Optional[int] = self.get_dummy_inputs(a_ ) lowercase__ : Any = 3 * ["""this is a negative prompt"""] lowercase__ : List[str] = 3 * [inputs.pop("prompt" )] ( lowercase__ ) : str = sd_pipe.encode_prompt(a_ , negative_prompt=a_ ) lowercase__ : Any = sd_pipe( **a_ , prompt_embeds=a_ , negative_prompt_embeds=a_ , pooled_prompt_embeds=a_ , negative_pooled_prompt_embeds=a_ , ) lowercase__ : Optional[Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : List[str] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict="cpu" , SCREAMING_SNAKE_CASE : str=torch.floataa , SCREAMING_SNAKE_CASE : str=0 ): lowercase__ : str = torch.Generator(device=a_ ).manual_seed(a_ ) lowercase__ : Any = np.random.RandomState(a_ ).standard_normal((1, 4, 64, 64) ) lowercase__ : str = torch.from_numpy(a_ ).to(device=a_ , dtype=a_ ) lowercase__ : int = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def snake_case ( self : Union[str, Any] ): lowercase__ : str = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowercase__ : int = self.get_inputs(a_ ) lowercase__ : Dict = pipe(**a_ ).images lowercase__ : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowercase__ : Optional[int] = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
706
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''], '''processing_mgp_str''': ['''MgpstrProcessor'''], '''tokenization_mgp_str''': ['''MgpstrTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MgpstrModel''', '''MgpstrPreTrainedModel''', '''MgpstrForSceneTextRecognition''', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class snake_case__(unittest.TestCase ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=7 , SCREAMING_SNAKE_CASE : List[str]=3 , SCREAMING_SNAKE_CASE : Tuple=18 , SCREAMING_SNAKE_CASE : str=30 , SCREAMING_SNAKE_CASE : Tuple=400 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Any=32 , SCREAMING_SNAKE_CASE : Optional[int]=True , ): lowercase__ : Optional[int] = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : Optional[int] = num_channels lowercase__ : Union[str, Any] = image_size lowercase__ : Union[str, Any] = min_resolution lowercase__ : List[Any] = max_resolution lowercase__ : Tuple = do_resize lowercase__ : Tuple = size_divisor lowercase__ : Tuple = do_rescale def snake_case ( self : Dict ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = GLPNImageProcessor if is_vision_available() else None def snake_case ( self : int ): lowercase__ : str = GLPNImageProcessingTester(self ) @property def snake_case ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self : Optional[Any] ): lowercase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCAmelCase , "size_divisor" ) ) self.assertTrue(hasattr(__lowerCAmelCase , "resample" ) ) self.assertTrue(hasattr(__lowerCAmelCase , "do_rescale" ) ) def snake_case ( self : List[str] ): pass def snake_case ( self : List[Any] ): lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def snake_case ( self : List[str] ): lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def snake_case ( self : str ): lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
707
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Optional[Any] ): lowercase__ : Dict = tempfile.mkdtemp() # fmt: off lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) lowercase__ : Tuple = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def snake_case ( self : Any ): lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case ( self : int ): lowercase__ : Optional[int] = self.get_tokenizer() lowercase__ : List[Any] = self.get_rust_tokenizer() lowercase__ : List[str] = self.get_image_processor() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_slow.save_pretrained(self.tmpdirname ) lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) processor_fast.save_pretrained(self.tmpdirname ) lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] ): lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : int = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.prepare_image_inputs() lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def snake_case ( self : str ): lowercase__ : Tuple = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Tuple = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = "lower newer" lowercase__ : str = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE ): processor() def snake_case ( self : Optional[Any] ): lowercase__ : Dict = self.get_image_processor() lowercase__ : Optional[Any] = self.get_tokenizer() lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : List[str] = self.get_image_processor() lowercase__ : List[str] = self.get_tokenizer() lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE ) lowercase__ : Any = "lower newer" lowercase__ : Union[str, Any] = self.prepare_image_inputs() lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
81
0
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : str ): debug_launcher(test_script.main ) def snake_case ( self : Union[str, Any] ): debug_launcher(test_ops.main )
708
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : int ): lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : str = -1 lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : int = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[int] ): lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = -1 lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer.decode(greedy_ids[0] ) lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE ) thread.start() lowercase__ : List[Any] = "" for new_text in streamer: streamer_text += new_text self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = -1 lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE ) lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :] lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : Optional[Any] = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : Any ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" ) lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = -1 lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n" lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def snake_case ( self : Optional[int] ): lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE ) lowercase__ : int = -1 lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 ) lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(SCREAMING_SNAKE_CASE ): lowercase__ : List[str] = "" for new_text in streamer: streamer_text += new_text
81
0
from __future__ import annotations import math from collections.abc import Callable def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 100 , ): """simple docstring""" lowercase__ : str = x_start lowercase__ : Optional[Any] = fnc(_lowercase ) lowercase__ : Union[str, Any] = 0.0 for _ in range(_lowercase ): # Approximates curve as a sequence of linear lines and sums their length lowercase__ : Optional[Any] = (x_end - x_start) / steps + xa lowercase__ : str = fnc(_lowercase ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowercase__ : Tuple = xa lowercase__ : Optional[Any] = fxa return length if __name__ == "__main__": def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return math.sin(10 * x ) print('''f(x) = sin(10 * x)''') print('''The length of the curve from x = -10 to x = 10 is:''') lowerCAmelCase__ = 1_0 while i <= 1_0_0_0_0_0: print(f'''With {i} steps: {line_length(f, -1_0, 1_0, i)}''') i *= 1_0
709
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = 42 class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ): super().__init__() lowercase__ : str = layers_per_block lowercase__ : int = torch.nn.Convad( SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Union[str, Any] = None lowercase__ : Optional[int] = nn.ModuleList([] ) # down lowercase__ : Dict = block_out_channels[0] for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ : List[str] = output_channel lowercase__ : Dict = block_out_channels[i] lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1 lowercase__ : Union[str, Any] = get_down_block( SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) self.down_blocks.append(SCREAMING_SNAKE_CASE ) # mid lowercase__ : Optional[int] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) # out lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 ) lowercase__ : Union[str, Any] = nn.SiLU() lowercase__ : Tuple = 2 * out_channels if double_z else out_channels lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 ) lowercase__ : Tuple = False def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : List[str] = x lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE ) if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ): def custom_forward(*SCREAMING_SNAKE_CASE : Dict ): return module(*SCREAMING_SNAKE_CASE ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) # middle lowercase__ : int = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) else: for down_block in self.down_blocks: lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) # middle lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE ) else: # down for down_block in self.down_blocks: lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE ) # middle lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE ) # post-process lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE ) lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE ) return sample class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ): super().__init__() lowercase__ : List[str] = layers_per_block lowercase__ : int = nn.Convad( SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) lowercase__ : Optional[Any] = None lowercase__ : Dict = nn.ModuleList([] ) lowercase__ : List[str] = in_channels if norm_type == "spatial" else None # mid lowercase__ : str = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , ) # up lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) ) lowercase__ : Dict = reversed_block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ : Tuple = output_channel lowercase__ : List[Any] = reversed_block_out_channels[i] lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1 lowercase__ : Dict = get_up_block( SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , ) self.up_blocks.append(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = output_channel # out if norm_type == "spatial": lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE ) else: lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 ) lowercase__ : Union[str, Any] = nn.SiLU() lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 ) lowercase__ : List[Any] = False def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ): lowercase__ : Tuple = z lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ): def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ): return module(*SCREAMING_SNAKE_CASE ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle lowercase__ : List[str] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE ) else: # middle lowercase__ : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: # middle lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # post-process if latent_embeds is None: lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE ) else: lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE ) return sample class snake_case__(nn.Module ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ): super().__init__() lowercase__ : List[Any] = n_e lowercase__ : List[str] = vq_embed_dim lowercase__ : Optional[Any] = beta lowercase__ : List[str] = legacy lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) lowercase__ : Union[str, Any] = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) lowercase__ : Tuple = self.used.shape[0] lowercase__ : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": lowercase__ : Any = self.re_embed lowercase__ : Tuple = self.re_embed + 1 print( f"""Remapping {self.n_e} indices to {self.re_embed} indices. """ f"""Using {self.unknown_index} for unknown indices.""" ) else: lowercase__ : str = n_e lowercase__ : Union[str, Any] = sane_index_shape def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : Any = inds.shape assert len(SCREAMING_SNAKE_CASE ) > 1 lowercase__ : List[str] = inds.reshape(ishape[0] , -1 ) lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long() lowercase__ : Dict = match.argmax(-1 ) lowercase__ : Dict = match.sum(2 ) < 1 if self.unknown_index == "random": lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: lowercase__ : List[Any] = self.unknown_index return new.reshape(SCREAMING_SNAKE_CASE ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ): lowercase__ : List[Any] = inds.shape assert len(SCREAMING_SNAKE_CASE ) > 1 lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 ) lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE ) if self.re_embed > self.used.shape[0]: # extra token lowercase__ : int = 0 # simply set to zero lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE ) return back.reshape(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ): # reshape z -> (batch, height, width, channel) and flatten lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous() lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 ) lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape ) lowercase__ : Dict = None lowercase__ : int = None # compute loss for embedding if not self.legacy: lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients lowercase__ : Union[str, Any] = z + (z_q - z).detach() # reshape back to match original input shape lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ): # shape specifying (batch, height, width, channel) if self.remap is not None: lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again # get quantized latent vectors lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE ) if shape is not None: lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE ) # reshape back to match original input shape lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ): lowercase__ : Dict = parameters lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 ) lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 ) lowercase__ : Optional[int] = deterministic lowercase__ : Tuple = torch.exp(0.5 * self.logvar ) lowercase__ : Optional[int] = torch.exp(self.logvar ) if self.deterministic: lowercase__ : Any = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype lowercase__ : Tuple = randn_tensor( self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype ) lowercase__ : str = self.mean + self.std * sample return x def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) lowercase__ : Any = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE ) def snake_case ( self : Tuple ): return self.mean
81
0
import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : int = np.array([[1, item, train_mtch[i]] for i, item in enumerate(UpperCAmelCase__ )] ) lowercase__ : int = np.array(UpperCAmelCase__ ) lowercase__ : str = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , UpperCAmelCase__ ) ) , x.transpose() ) , UpperCAmelCase__ ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Optional[Any] = (1, 2, 1) lowercase__ : List[str] = (1, 1, 0, 7) lowercase__ : List[str] = SARIMAX( UpperCAmelCase__ , exog=UpperCAmelCase__ , order=UpperCAmelCase__ , seasonal_order=UpperCAmelCase__ ) lowercase__ : Optional[int] = model.fit(disp=UpperCAmelCase__ , maxiter=600 , method="nm" ) lowercase__ : Dict = model_fit.predict(1 , len(UpperCAmelCase__ ) , exog=[test_match] ) return result[0] def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Any = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(UpperCAmelCase__ , UpperCAmelCase__ ) lowercase__ : Union[str, Any] = regressor.predict(UpperCAmelCase__ ) return y_pred[0] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" train_user.sort() lowercase__ : Optional[int] = np.percentile(UpperCAmelCase__ , 25 ) lowercase__ : int = np.percentile(UpperCAmelCase__ , 75 ) lowercase__ : str = qa - qa lowercase__ : Union[str, Any] = qa - (iqr * 0.1) return low_lim def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : Optional[int] = 0 lowercase__ : str = 0 for i in list_vote: if i > actual_result: lowercase__ : Optional[Any] = not_safe + 1 else: if abs(abs(UpperCAmelCase__ ) - abs(UpperCAmelCase__ ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) lowerCAmelCase__ = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]] lowerCAmelCase__ = pd.DataFrame( data_input, columns=['''total_user''', '''total_even''', '''days'''] ) lowerCAmelCase__ = Normalizer().fit_transform(data_input_df.values) # split data lowerCAmelCase__ = normalize_df[:, 2].tolist() lowerCAmelCase__ = normalize_df[:, 0].tolist() lowerCAmelCase__ = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) lowerCAmelCase__ = normalize_df[:, [1, 2]].tolist() lowerCAmelCase__ = x[: len(x) - 1] lowerCAmelCase__ = x[len(x) - 1 :] # for linear regression & sarimax lowerCAmelCase__ = total_date[: len(total_date) - 1] lowerCAmelCase__ = total_user[: len(total_user) - 1] lowerCAmelCase__ = total_match[: len(total_match) - 1] lowerCAmelCase__ = total_date[len(total_date) - 1 :] lowerCAmelCase__ = total_user[len(total_user) - 1 :] lowerCAmelCase__ = total_match[len(total_match) - 1 :] # voting system with forecasting lowerCAmelCase__ = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data lowerCAmelCase__ = '''''' if data_safety_checker(res_vote, tst_user) else '''not ''' print('''Today\'s data is {not_str}safe.''')
710
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = DiTPipeline lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowercase_ = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowercase_ = False def snake_case ( self : int ): torch.manual_seed(0 ) lowercase__ : Optional[Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , ) lowercase__ : Dict = AutoencoderKL() lowercase__ : Any = DDIMScheduler() lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ): if str(SCREAMING_SNAKE_CASE ).startswith("mps" ): lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) lowercase__ : int = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def snake_case ( self : Any ): lowercase__ : List[Any] = "cpu" lowercase__ : str = self.get_dummy_components() lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE ) pipe.to(SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images lowercase__ : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 ) def snake_case ( self : str ): self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def snake_case ( self : Tuple ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : str ): lowercase__ : List[Any] = torch.manual_seed(0 ) lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"] lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE ) lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-2 def snake_case ( self : Union[str, Any] ): lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) lowercase__ : Dict = ["vase", "umbrella"] lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = torch.manual_seed(0 ) lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-1
81
0
from __future__ import annotations def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError("You cannot supply more or less than 2 values" ) elif stress < 0: raise ValueError("Stress cannot be negative" ) elif tangential_force < 0: raise ValueError("Tangential Force cannot be negative" ) elif area < 0: raise ValueError("Area cannot be negative" ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
711
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = (CMStochasticIterativeScheduler,) lowercase_ = 1_0 def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ): lowercase__ : Any = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**SCREAMING_SNAKE_CASE ) return config def snake_case ( self : Optional[int] ): lowercase__ : Tuple = 10 lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) lowercase__ : Any = scheduler.timesteps[0] lowercase__ : Optional[int] = scheduler.timesteps[1] lowercase__ : List[Any] = self.dummy_sample lowercase__ : Tuple = 0.1 * sample lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def snake_case ( self : Dict ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE ) def snake_case ( self : str ): lowercase__ : Any = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Any = 1 scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = scheduler.timesteps lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : List[str] = self.dummy_model() lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(SCREAMING_SNAKE_CASE ): # 1. scale model input lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Dict = pred_prev_sample lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 192.7_614 ) < 1E-2 assert abs(result_mean.item() - 0.2_510 ) < 1E-3 def snake_case ( self : Union[str, Any] ): lowercase__ : Optional[int] = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config() lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = [106, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = scheduler.timesteps lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : Optional[int] = self.dummy_model() lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample lowercase__ : Union[str, Any] = pred_prev_sample lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 347.6_357 ) < 1E-2 assert abs(result_mean.item() - 0.4_527 ) < 1E-3 def snake_case ( self : Optional[int] ): lowercase__ : Union[str, Any] = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config() lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : int = [39, 30, 12, 15, 0] with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : Union[str, Any] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : Dict = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0] lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE ) with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : List[Any] = self.get_scheduler_config() lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
81
0
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class snake_case__: """simple docstring""" @staticmethod def snake_case ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Dict ): pass def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : str = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Tuple = np.array(_lowercase ) lowercase__ : Any = npimg.shape return {"hash": hashimage(_lowercase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class snake_case__(unittest.TestCase ): """simple docstring""" lowercase_ = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) lowercase_ = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ): lowercase__ : Any = MaskGenerationPipeline(model=UpperCamelCase__ , image_processor=UpperCamelCase__ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ): pass @require_tf @unittest.skip("Image segmentation not implemented in TF" ) def snake_case ( self : int ): pass @slow @require_torch def snake_case ( self : int ): lowercase__ : int = pipeline("mask-generation" , model="facebook/sam-vit-huge" ) lowercase__ : List[str] = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 ) # Shortening by hashing lowercase__ : int = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase__ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.021}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053}, {"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_967}, {"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.993}, {"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_909}, {"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_879}, {"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_834}, {"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_716}, {"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_612}, {"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_599}, {"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_552}, {"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_532}, {"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_516}, {"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_499}, {"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_483}, {"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_464}, {"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.943}, {"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.943}, {"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_408}, {"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_335}, {"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_326}, {"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_262}, {"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_999}, {"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_986}, {"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_984}, {"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_873}, {"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_871} ] , ) # fmt: on @require_torch @slow def snake_case ( self : Any ): lowercase__ : List[Any] = '''facebook/sam-vit-huge''' lowercase__ : Tuple = pipeline("mask-generation" , model=UpperCamelCase__ ) lowercase__ : Tuple = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing lowercase__ : Optional[Any] = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase__ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_210}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053}, ] , )
712
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class snake_case__: """simple docstring""" lowercase_ = 42 # setable values lowercase_ = 42 lowercase_ = 42 lowercase_ = None @classmethod def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ): return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE ) @dataclass class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = 42 class snake_case__(_UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers] lowercase_ = 42 @property def snake_case ( self : Dict ): return True @register_to_config def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ): lowercase__ : List[Any] = dtype def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ): if common is None: lowercase__ : Dict = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype ) lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ): return sample def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ): lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , ) def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ): lowercase__ : Tuple = state.common.alphas_cumprod[t] lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: lowercase__ : Dict = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) ) elif variance_type == "fixed_large": lowercase__ : Union[str, Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log lowercase__ : List[Any] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": lowercase__ : List[Any] = variance lowercase__ : Union[str, Any] = state.common.betas[t] lowercase__ : Tuple = (predicted_variance + 1) / 2 lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log return variance def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ): lowercase__ : Tuple = timestep if key is None: lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 ) else: lowercase__ : Any = None # 1. compute alphas, betas lowercase__ : Dict = state.common.alphas_cumprod[t] lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) lowercase__ : Optional[Any] = 1 - alpha_prod_t lowercase__ : Optional[int] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowercase__ : Optional[Any] = model_output elif self.config.prediction_type == "v_prediction": lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """ " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 ) lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) lowercase__ : Optional[int] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ): return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ): return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __len__( self : Tuple ): return self.config.num_train_timesteps
81
0
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if not nums: raise ValueError("List is empty" ) return sum(__UpperCamelCase ) / len(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
713
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case__(_UpperCamelCase ): """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ): super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: lowercase__ : Optional[Any] = ( f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) lowercase__ : int = dict(scheduler.config ) lowercase__ : Any = 1 lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: lowercase__ : Optional[Any] = ( f"""The configuration file of this scheduler: {scheduler} has not set the configuration""" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = dict(scheduler.config ) lowercase__ : Union[str, Any] = True lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE ) if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase__ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE ) def snake_case ( self : List[Any] ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) lowercase__ : Union[str, Any] = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case ( self : Optional[Any] ): if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ): lowercase__ : Dict = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE ) lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size ) # Run inpainting pipeline with the generated mask lowercase__ : int = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
81
0
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case__(unittest.TestCase ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=7 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : List[str]=18 , SCREAMING_SNAKE_CASE : Dict=30 , SCREAMING_SNAKE_CASE : int=400 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[Any]=True , ): lowercase__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18} lowercase__ : Optional[int] = parent lowercase__ : List[Any] = batch_size lowercase__ : Union[str, Any] = num_channels lowercase__ : Tuple = image_size lowercase__ : Optional[int] = min_resolution lowercase__ : int = max_resolution lowercase__ : Optional[Any] = do_resize lowercase__ : str = size lowercase__ : Any = apply_ocr def snake_case ( self : List[Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case__(UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" lowercase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def snake_case ( self : str ): lowercase__ : Tuple = LayoutLMvaImageProcessingTester(self ) @property def snake_case ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self : Union[str, Any] ): lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "apply_ocr" ) ) def snake_case ( self : Optional[int] ): lowercase__ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) lowercase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : Tuple ): lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE ) self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE ) # Test batched lowercase__ : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def snake_case ( self : Union[str, Any] ): lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input lowercase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowercase__ : List[str] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def snake_case ( self : Optional[Any] ): lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowercase__ : Any = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def snake_case ( self : int ): lowercase__ : str = LayoutLMvaImageProcessor() from datasets import load_dataset lowercase__ : Any = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) lowercase__ : Union[str, Any] = Image.open(ds[0]["file"] ).convert("RGB" ) lowercase__ : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowercase__ : List[str] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 lowercase__ : Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE ) self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE ) # with apply_OCR = False lowercase__ : Tuple = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
714
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] lowercase__ : str = True if "large" in model_name or "huge" in model_name else False lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowercase__ : int = [3, 3, 3, 3] lowercase__ : Tuple = [5, 5, 5, 5] elif "fl4" in model_name: lowercase__ : Optional[Any] = [4, 4, 4, 4] lowercase__ : Optional[Any] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowercase__ : Union[str, Any] = [3, 3, 3, 3] if "lrf" in model_name: lowercase__ : Union[str, Any] = [3, 3, 3, 3] else: lowercase__ : Tuple = [2, 2, 2, 2] if "tiny" in model_name: lowercase__ : Optional[Any] = 96 elif "small" in model_name: lowercase__ : List[str] = 96 elif "base" in model_name: lowercase__ : str = 128 elif "large" in model_name: lowercase__ : Any = 192 elif "xlarge" in model_name: lowercase__ : str = 256 elif "huge" in model_name: lowercase__ : List[str] = 352 # set label information lowercase__ : Tuple = "huggingface/label-files" if "large" in model_name or "huge" in model_name: lowercase__ : List[Any] = "imagenet-22k-id2label.json" else: lowercase__ : Optional[int] = "imagenet-1k-id2label.json" lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : int = {v: k for k, v in idalabel.items()} lowercase__ : str = FocalNetConfig( embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , ) return config def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if "patch_embed.proj" in name: lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: lowercase__ : List[str] = "encoder." + name if "encoder.layers" in name: lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: lowercase__ : List[str] = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": lowercase__ : List[str] = "layernorm.weight" if name == "norm.bias": lowercase__ : List[Any] = "layernorm.bias" if "head" in name: lowercase__ : Optional[int] = name.replace("head" , "classifier" ) else: lowercase__ : Union[str, Any] = "focalnet." + name return name def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): """simple docstring""" lowercase__ : List[Any] = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on lowercase__ : Union[str, Any] = model_name_to_url[model_name] print("Checkpoint URL: " , lowerCamelCase__ ) lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ ) lowercase__ : List[str] = val lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ ) lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ ) model.eval() # load state dict model.load_state_dict(lowerCamelCase__ ) # verify conversion lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ : int = BitImageProcessor( do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , ) lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" ) lowercase__ : Any = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 ) lowercase__ : List[Any] = model(**lowerCamelCase__ ) lowercase__ : int = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) lowerCAmelCase__ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
81
0
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class snake_case__(lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" lowercase_ = AutoencoderKL lowercase_ = """sample""" lowercase_ = 1e-2 @property def snake_case ( self : int ): lowercase__ : List[str] = 4 lowercase__ : int = 3 lowercase__ : int = (32, 32) lowercase__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowercase ) return {"sample": image} @property def snake_case ( self : int ): return (3, 32, 32) @property def snake_case ( self : Optional[int] ): return (3, 32, 32) def snake_case ( self : int ): lowercase__ : Optional[Any] = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } lowercase__ : Optional[int] = self.dummy_input return init_dict, inputs_dict def snake_case ( self : Union[str, Any] ): pass def snake_case ( self : Dict ): pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def snake_case ( self : List[Any] ): # enable deterministic behavior for gradient checkpointing lowercase__ , lowercase__ : List[str] = self.prepare_init_args_and_inputs_for_common() lowercase__ : Optional[int] = self.model_class(**__lowercase ) model.to(__lowercase ) assert not model.is_gradient_checkpointing and model.training lowercase__ : List[str] = model(**__lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() lowercase__ : Dict = torch.randn_like(__lowercase ) lowercase__ : List[Any] = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing lowercase__ : str = self.model_class(**__lowercase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__lowercase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training lowercase__ : Dict = model_a(**__lowercase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() lowercase__ : str = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) lowercase__ : Any = dict(model.named_parameters() ) lowercase__ : Tuple = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def snake_case ( self : Union[str, Any] ): lowercase__ , lowercase__ : List[str] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__lowercase ) self.assertIsNotNone(__lowercase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__lowercase ) lowercase__ : List[str] = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def snake_case ( self : Any ): lowercase__ : Union[str, Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) lowercase__ : Optional[int] = model.to(__lowercase ) model.eval() if torch_device == "mps": lowercase__ : Optional[Any] = torch.manual_seed(0 ) else: lowercase__ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowercase__ : Union[str, Any] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) lowercase__ : str = image.to(__lowercase ) with torch.no_grad(): lowercase__ : Dict = model(__lowercase , sample_posterior=__lowercase , generator=__lowercase ).sample lowercase__ : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": lowercase__ : Optional[int] = torch.tensor( [ -4.0_0_7_8E-0_1, -3.8_3_2_3E-0_4, -1.2_6_8_1E-0_1, -1.1_4_6_2E-0_1, 2.0_0_9_5E-0_1, 1.0_8_9_3E-0_1, -8.8_2_4_7E-0_2, -3.0_3_6_1E-0_1, -9.8_6_4_4E-0_3, ] ) elif torch_device == "cpu": lowercase__ : Optional[Any] = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: lowercase__ : int = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(__lowercase , __lowercase , rtol=1E-2 ) ) @slow class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ): return f"""gaussian_noise_s={seed}_shape={"_".join([str(__lowercase ) for s in shape] )}.npy""" def snake_case ( self : Optional[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : List[str]=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE : List[Any]=False ): lowercase__ : int = torch.floataa if fpaa else torch.floataa lowercase__ : Any = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowercase , __lowercase ) ) ).to(__lowercase ).to(__lowercase ) return image def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE : List[Any]=False ): lowercase__ : List[Any] = "fp16" if fpaa else None lowercase__ : Any = torch.floataa if fpaa else torch.floataa lowercase__ : List[Any] = AutoencoderKL.from_pretrained( __lowercase , subfolder="vae" , torch_dtype=__lowercase , revision=__lowercase , ) model.to(__lowercase ).eval() return model def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[int]=0 ): if torch_device == "mps": return torch.manual_seed(__lowercase ) return torch.Generator(device=__lowercase ).manual_seed(__lowercase ) @parameterized.expand( [ # fmt: off [33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] ): lowercase__ : str = self.get_sd_vae_model() lowercase__ : Dict = self.get_sd_image(__lowercase ) lowercase__ : List[str] = self.get_generator(__lowercase ) with torch.no_grad(): lowercase__ : str = model(__lowercase , generator=__lowercase , sample_posterior=__lowercase ).sample assert sample.shape == image.shape lowercase__ : Optional[int] = sample[-1, -2:, -2:, :2].flatten().float().cpu() lowercase__ : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__lowercase , __lowercase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : Dict = self.get_sd_vae_model(fpaa=__lowercase ) lowercase__ : List[Any] = self.get_sd_image(__lowercase , fpaa=__lowercase ) lowercase__ : Optional[int] = self.get_generator(__lowercase ) with torch.no_grad(): lowercase__ : Dict = model(__lowercase , generator=__lowercase , sample_posterior=__lowercase ).sample assert sample.shape == image.shape lowercase__ : List[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu() lowercase__ : int = torch.tensor(__lowercase ) assert torch_all_close(__lowercase , __lowercase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ): lowercase__ : Optional[int] = self.get_sd_vae_model() lowercase__ : Tuple = self.get_sd_image(__lowercase ) with torch.no_grad(): lowercase__ : Dict = model(__lowercase ).sample assert sample.shape == image.shape lowercase__ : Tuple = sample[-1, -2:, -2:, :2].flatten().float().cpu() lowercase__ : List[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__lowercase , __lowercase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ): lowercase__ : str = self.get_sd_vae_model() lowercase__ : Any = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): lowercase__ : Optional[int] = model.decode(__lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowercase__ : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().cpu() lowercase__ : Optional[Any] = torch.tensor(__lowercase ) assert torch_all_close(__lowercase , __lowercase , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ): lowercase__ : List[str] = self.get_sd_vae_model(fpaa=__lowercase ) lowercase__ : Dict = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) , fpaa=__lowercase ) with torch.no_grad(): lowercase__ : Any = model.decode(__lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowercase__ : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu() lowercase__ : Optional[int] = torch.tensor(__lowercase ) assert torch_all_close(__lowercase , __lowercase , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase__ : List[Any] = self.get_sd_vae_model(fpaa=__lowercase ) lowercase__ : str = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) , fpaa=__lowercase ) with torch.no_grad(): lowercase__ : Optional[Any] = model.decode(__lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowercase__ : Dict = model.decode(__lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__lowercase , __lowercase , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any ): lowercase__ : int = self.get_sd_vae_model() lowercase__ : Dict = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) ) with torch.no_grad(): lowercase__ : Optional[int] = model.decode(__lowercase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowercase__ : int = model.decode(__lowercase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__lowercase , __lowercase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase__ : Union[str, Any] = self.get_sd_vae_model() lowercase__ : Dict = self.get_sd_image(__lowercase ) lowercase__ : Tuple = self.get_generator(__lowercase ) with torch.no_grad(): lowercase__ : Optional[Any] = model.encode(__lowercase ).latent_dist lowercase__ : Optional[Any] = dist.sample(generator=__lowercase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] lowercase__ : Optional[int] = sample[0, -1, -3:, -3:].flatten().cpu() lowercase__ : int = torch.tensor(__lowercase ) lowercase__ : Any = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(__lowercase , __lowercase , atol=__lowercase )
715
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class snake_case__(_UpperCamelCase ): """simple docstring""" lowercase_ = """informer""" lowercase_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ): # time series specific configuration lowercase__ : Any = prediction_length lowercase__ : List[str] = context_length or prediction_length lowercase__ : Tuple = distribution_output lowercase__ : Union[str, Any] = loss lowercase__ : Union[str, Any] = input_size lowercase__ : List[str] = num_time_features lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowercase__ : List[str] = scaling lowercase__ : str = num_dynamic_real_features lowercase__ : Tuple = num_static_real_features lowercase__ : List[str] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Dict = cardinality else: lowercase__ : Dict = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) lowercase__ : Union[str, Any] = embedding_dimension else: lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase__ : Dict = num_parallel_samples # Transformer architecture configuration lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features lowercase__ : Optional[Any] = d_model lowercase__ : int = encoder_attention_heads lowercase__ : Tuple = decoder_attention_heads lowercase__ : List[Any] = encoder_ffn_dim lowercase__ : List[str] = decoder_ffn_dim lowercase__ : List[str] = encoder_layers lowercase__ : Tuple = decoder_layers lowercase__ : Union[str, Any] = dropout lowercase__ : List[Any] = attention_dropout lowercase__ : str = activation_dropout lowercase__ : int = encoder_layerdrop lowercase__ : Union[str, Any] = decoder_layerdrop lowercase__ : Tuple = activation_function lowercase__ : str = init_std lowercase__ : Tuple = use_cache # Informer lowercase__ : Union[str, Any] = attention_type lowercase__ : Union[str, Any] = sampling_factor lowercase__ : Tuple = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def snake_case ( self : str ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
81
0
from torch import nn def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
716
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) else: lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ ) lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) lowercase__ : int = ["key_proj", "value_proj", "query_proj"] lowercase__ : str = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: lowercase__ : Union[str, Any] = key.split("." ) if attributes[0] == "lm_head": lowercase__ : Tuple = prophet lowercase__ : Tuple = prophet_old else: lowercase__ : Tuple = prophet.prophetnet lowercase__ : List[str] = prophet_old.model lowercase__ : int = False for attribute in attributes: if attribute in mapping: lowercase__ : int = mapping[attribute] if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0: lowercase__ : Dict = attribute elif hasattr(lowerCamelCase__ , lowerCamelCase__ ): lowercase__ : Optional[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowercase__ : Any = old_model.weight logger.info(F"""{attribute} is initialized.""" ) lowercase__ : str = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowercase__ : Tuple = old_model.bias logger.info(F"""{attribute} is initialized""" ) lowercase__ : str = True break elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ): lowercase__ : str = old_model.in_proj_weight.shape[0] // 3 lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowercase__ : Tuple = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) lowercase__ : Union[str, Any] = True break if attribute.isdigit(): lowercase__ : str = model[int(lowerCamelCase__ )] lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )] else: lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ ) if old_attribute == "": lowercase__ : str = old_model else: if not hasattr(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError(F"""{old_model} does not have {old_attribute}""" ) lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ ) if not is_key_init: raise ValueError(F"""{key} was not correctly initialized!""" ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
81
0
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__(UpperCamelCase__ , unittest.TestCase): """simple docstring""" lowercase_ = KandinskyVaaPriorPipeline lowercase_ = ["""prompt"""] lowercase_ = ["""prompt""", """negative_prompt"""] lowercase_ = [ """num_images_per_prompt""", """generator""", """num_inference_steps""", """latents""", """negative_prompt""", """guidance_scale""", """output_type""", """return_dict""", ] lowercase_ = False @property def snake_case ( self : Tuple ): return 32 @property def snake_case ( self : Tuple ): return 32 @property def snake_case ( self : int ): return self.time_input_dim @property def snake_case ( self : List[Any] ): return self.time_input_dim * 4 @property def snake_case ( self : Tuple ): return 100 @property def snake_case ( self : List[Any] ): lowercase__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def snake_case ( self : Union[str, Any] ): torch.manual_seed(0 ) lowercase__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE ) @property def snake_case ( self : List[str] ): torch.manual_seed(0 ) lowercase__ : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } lowercase__ : Optional[int] = PriorTransformer(**SCREAMING_SNAKE_CASE ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 lowercase__ : Any = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def snake_case ( self : List[str] ): torch.manual_seed(0 ) lowercase__ : Optional[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) lowercase__ : Dict = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE ) return model @property def snake_case ( self : Optional[int] ): lowercase__ : Optional[Any] = CLIPImageProcessor( crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE , do_resize=SCREAMING_SNAKE_CASE , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , ) return image_processor def snake_case ( self : Union[str, Any] ): lowercase__ : int = self.dummy_prior lowercase__ : List[str] = self.dummy_image_encoder lowercase__ : int = self.dummy_text_encoder lowercase__ : List[str] = self.dummy_tokenizer lowercase__ : Any = self.dummy_image_processor lowercase__ : Dict = UnCLIPScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=SCREAMING_SNAKE_CASE , clip_sample_range=10.0 , ) lowercase__ : Optional[int] = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict=0 ): if str(SCREAMING_SNAKE_CASE ).startswith("mps" ): lowercase__ : Any = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: lowercase__ : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def snake_case ( self : Tuple ): lowercase__ : Tuple = "cpu" lowercase__ : List[Any] = self.get_dummy_components() lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE ) lowercase__ : str = pipe.to(SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowercase__ : Tuple = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) ) lowercase__ : int = output.image_embeds lowercase__ : List[str] = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE , )[0] lowercase__ : int = image[0, -10:] lowercase__ : List[Any] = image_from_tuple[0, -10:] assert image.shape == (1, 32) lowercase__ : Optional[Any] = np.array( [-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def snake_case ( self : str ): lowercase__ : Optional[int] = torch_device == "cpu" lowercase__ : Any = True lowercase__ : Tuple = False self._test_inference_batch_single_identical( test_max_difference=SCREAMING_SNAKE_CASE , relax_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , ) @skip_mps def snake_case ( self : Union[str, Any] ): lowercase__ : List[str] = torch_device == "cpu" lowercase__ : Any = False self._test_attention_slicing_forward_pass( test_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
717
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = GPTaTokenizer lowercase_ = GPTaTokenizerFast lowercase_ = True lowercase_ = {"""add_prefix_space""": True} lowercase_ = False def snake_case ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : List[str] = {"unk_token": "<unk>"} lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ): kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : List[str] = "lower newer" lowercase__ : Optional[Any] = "lower newer" return input_text, output_text def snake_case ( self : Any ): lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__ : Dict = "lower newer" lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokens + [tokenizer.unk_token] lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if not self.test_rust_tokenizer: return lowercase__ : Dict = self.get_tokenizer() lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" # Testing tokenization lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids without special tokens lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids with special tokens lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing the unknown token lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token] lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # Simple input lowercase__ : Dict = "This is a simple input" lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[int] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) def snake_case ( self : Any ): lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input lowercase__ : Optional[int] = "This is a simple input" lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"] lowercase__ : List[Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[Any] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] lowercase__ : Any = tokenizer.pad_token_id lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def snake_case ( self : str ): lowercase__ : List[str] = "$$$" lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = "This is a simple input" lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Optional[int] = tokenizer.bos_token_id lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids ) lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def snake_case ( self : Optional[int] ): pass def snake_case ( self : Tuple ): # TODO: change to self.get_tokenizers() when the fast version is implemented lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase__ : str = "Encode this." lowercase__ : List[Any] = "This one too please." lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.encode_plus( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , ) lowercase__ : Tuple = encoded_sequence_dict["input_ids"] lowercase__ : int = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) lowercase__ : List[str] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE ) ] lowercase__ : Any = [x for x in filtered_sequence if x is not None] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @require_tokenizers class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Union[str, Any] ): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("test_opt" ) lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" ) lowercase__ : Dict = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE ) lowercase__ : int = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # Same as above self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def snake_case ( self : Tuple ): lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "bos" lowercase__ : List[Any] = tokenizer.get_vocab()["bos"] lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Union[str, Any] = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # We changed the bos token self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("./tok" ) lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
81
0
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ): return False return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule ) def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = True ): """simple docstring""" lowercase__ : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) lowercase__ : Optional[Any] = is_compiled_module(__lowerCAmelCase ) if is_compiled: lowercase__ : Optional[Any] = model lowercase__ : Tuple = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowercase__ : List[Any] = model.module if not keep_fpaa_wrapper: lowercase__ : Tuple = getattr(__lowerCAmelCase , "forward" ) lowercase__ : Tuple = model.__dict__.pop("_original_forward" , __lowerCAmelCase ) if original_forward is not None: while hasattr(__lowerCAmelCase , "__wrapped__" ): lowercase__ : List[Any] = forward.__wrapped__ if forward == original_forward: break lowercase__ : Optional[Any] = forward if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ): convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase ) if is_compiled: lowercase__ : str = model lowercase__ : Optional[Any] = compiled_model return model def __lowerCamelCase ( ): """simple docstring""" PartialState().wait_for_everyone() def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(__lowerCAmelCase , __lowerCAmelCase ) elif PartialState().local_process_index == 0: torch.save(__lowerCAmelCase , __lowerCAmelCase ) @contextmanager def __lowerCamelCase ( **lowerCamelCase__ ): """simple docstring""" for key, value in kwargs.items(): lowercase__ : int = str(__lowerCAmelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ): lowercase__ : Dict = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase ) if hasattr(__lowerCAmelCase , "__qualname__" ): return obj.__qualname__ if hasattr(__lowerCAmelCase , "__name__" ): return obj.__name__ return str(__lowerCAmelCase ) def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for key, value in source.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowercase__ : Union[str, Any] = destination.setdefault(__lowerCAmelCase , {} ) merge_dicts(__lowerCAmelCase , __lowerCAmelCase ) else: lowercase__ : str = value return destination def __lowerCamelCase ( lowerCamelCase__ = None ): """simple docstring""" if port is None: lowercase__ : Union[str, Any] = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
718
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
81
0