code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import Any class _snake_case : def __init__( self ,UpperCamelCase ) -> List[str]: snake_case__ :Dict = data snake_case__ :Any = None def __repr__( self ) -> str: return f'Node({self.data})' class _snake_case : def __init__( self ) -> Optional[Any]: snake_case__ :str = None def __iter__( self ) -> Any: snake_case__ :Optional[Any] = self.head while node: yield node.data snake_case__ :Dict = node.next def __len__( self ) -> int: return sum(1 for _ in self ) def __repr__( self ) -> str: return "->".join([str(UpperCamelCase ) for item in self] ) def __getitem__( self ,UpperCamelCase ) -> Any: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self ,UpperCamelCase ,UpperCamelCase ) -> None: if not 0 <= index < len(self ): raise ValueError("list index out of range." ) snake_case__ :Any = self.head for _ in range(UpperCamelCase ): snake_case__ :Any = current.next snake_case__ :Optional[int] = data def lowerCAmelCase_ ( self ,UpperCamelCase ) -> None: self.insert_nth(len(self ) ,UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ) -> None: self.insert_nth(0 ,UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> None: if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) snake_case__ :Tuple = Node(UpperCamelCase ) if self.head is None: snake_case__ :Dict = new_node elif index == 0: snake_case__ :Union[str, Any] = self.head # link new_node to head snake_case__ :str = new_node else: snake_case__ :int = self.head for _ in range(index - 1 ): snake_case__ :Tuple = temp.next snake_case__ :List[Any] = temp.next snake_case__ :str = new_node def lowerCAmelCase_ ( self ) -> None: # print every node data print(self ) def lowerCAmelCase_ ( self ) -> Any: return self.delete_nth(0 ) def lowerCAmelCase_ ( self ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def lowerCAmelCase_ ( self ,UpperCamelCase = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) snake_case__ :Optional[Any] = self.head # default first node if index == 0: snake_case__ :Dict = self.head.next else: snake_case__ :Union[str, Any] = self.head for _ in range(index - 1 ): snake_case__ :Dict = temp.next snake_case__ :Tuple = temp.next snake_case__ :List[Any] = temp.next.next return delete_node.data def lowerCAmelCase_ ( self ) -> bool: return self.head is None def lowerCAmelCase_ ( self ) -> None: snake_case__ :Dict = None snake_case__ :Union[str, Any] = self.head while current: # Store the current node's next node. snake_case__ :Any = current.next # Make the current node's next point backwards snake_case__ :Any = prev # Make the previous node be the current node snake_case__ :int = current # Make the current node the next node (to progress iteration) snake_case__ :Optional[int] = next_node # Return prev in order to put the head at the end snake_case__ :Tuple = prev def lowercase_ ( ) -> None: '''simple docstring''' snake_case__ :List[Any] = LinkedList() assert linked_list.is_empty() is True assert str(__snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__snake_case ) == i linked_list.insert_nth(__snake_case , i + 1 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__snake_case ) == 9 assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): snake_case__ :int = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) ) def lowercase_ ( ) -> None: '''simple docstring''' snake_case__ :str = [ -9, 1_00, Node(77_34_51_12 ), "dlrow olleH", 7, 55_55, 0, -1_9_2.5_5_5_5_5, "Hello, world!", 7_7.9, Node(10 ), None, None, 1_2.2_0, ] snake_case__ :Any = LinkedList() for i in test_input: linked_list.insert_tail(__snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head snake_case__ :Optional[int] = linked_list.delete_head() assert result == -9 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail snake_case__ :Tuple = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list snake_case__ :List[str] = linked_list.delete_nth(10 ) assert result is None assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__snake_case ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowercase_ ( ) -> List[str]: '''simple docstring''' from doctest import testmod testmod() snake_case__ :str = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(__snake_case ) print("\nReading/changing Node data using indexing:" ) print(F'Element at Position 1: {linked_list[1]}' ) snake_case__ :Union[str, Any] = input("Enter New Value: " ).strip() print("New list:" ) print(__snake_case ) print(F'length of linked_list is : {len(__snake_case )}' ) if __name__ == "__main__": main()
57
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: # A mock response for an HTTP head request to emulate server down snake_case__ :Tuple = mock.Mock() snake_case__ :List[str] = 500 snake_case__ :Any = {} snake_case__ :Union[str, Any] = HTTPError snake_case__ :Tuple = {} # Download this model to make sure it's in the cache. snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def lowerCAmelCase_ ( self ) -> Dict: # A mock response for an HTTP head request to emulate server down snake_case__ :Union[str, Any] = mock.Mock() snake_case__ :int = 500 snake_case__ :Any = {} snake_case__ :Dict = HTTPError snake_case__ :List[Any] = {} # Download this model to make sure it's in the cache. snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase_ ( self ) -> int: # This test is for deprecated behavior and can be removed in v5 try: snake_case__ :Union[str, Any] = tempfile.mktemp() with open(UpperCamelCase ,"wb" ) as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase ) snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase ) finally: os.remove(UpperCamelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json" ,"wb" ) as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase ) snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json" ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ) @is_staging_test class _snake_case ( unittest.TestCase ): _A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def lowerCAmelCase_ ( cls ) -> Optional[int]: snake_case__ :List[str] = TOKEN HfFolder.save_token(UpperCamelCase ) @classmethod def lowerCAmelCase_ ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token ,repo_id="test-tokenizer" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" ) except HTTPError: pass def lowerCAmelCase_ ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :str = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token ) snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="test-tokenizer" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def lowerCAmelCase_ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Any = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token ) snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def lowerCAmelCase_ ( self ) -> Any: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase ) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase ) bert_tokenizer.save_pretrained(UpperCamelCase ) snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase ) tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" ) snake_case__ :List[str] = AutoTokenizer.from_pretrained( f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :int = Trie() trie.add("Hello 友達" ) self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} ) trie.add("Hello" ) trie.data self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} ) def lowerCAmelCase_ ( self ) -> int: snake_case__ :List[str] = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] ) trie.add("[CLS]" ) trie.add("extra_id_1" ) trie.add("extra_id_100" ) self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Optional[Any] = Trie() trie.add("A" ) self.assertEqual(trie.split("ABC" ) ,["A", "BC"] ) self.assertEqual(trie.split("BCA" ) ,["BC", "A"] ) def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :Any = Trie() trie.add("TOKEN]" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :List[Any] = Trie() trie.add("A" ) trie.add("P" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :str = Trie() trie.add("AB" ) trie.add("B" ) trie.add("C" ) self.assertEqual(trie.split("ABC" ) ,["AB", "C"] ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Dict = Trie() trie.add("ABC" ) trie.add("B" ) trie.add("CD" ) self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] ) def lowerCAmelCase_ ( self ) -> int: # Even if the offsets are wrong, we necessarily output correct string # parts. snake_case__ :Optional[int] = Trie() snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(UpperCamelCase ,["AB", "C"] )
57
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase : Tuple = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Any = ["ConditionalDetrFeatureExtractor"] __UpperCAmelCase : Union[str, Any] = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : str = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase : Optional[Any] = 1_6 __UpperCAmelCase : Optional[int] = 3_2 def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]: '''simple docstring''' snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case ) snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" ) def tokenize_function(__snake_case : Tuple ): # max_length=None => use the model max length (it's actually the default) snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case__ :List[Any] = datasets.map( __snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__snake_case : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. snake_case__ :Any = DataLoader( tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) snake_case__ :Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple: '''simple docstring''' model.eval() snake_case__ :Union[str, Any] = 0 for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case__ :List[Any] = model(**__snake_case ) snake_case__ :Any = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times snake_case__ , snake_case__ :Tuple = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__snake_case ) - 1: snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen] snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__snake_case , references=__snake_case , ) snake_case__ :int = metric.compute() return eval_metric["accuracy"] def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any: '''simple docstring''' snake_case__ :Any = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case__ :Union[str, Any] = config["lr"] snake_case__ :List[str] = int(config["num_epochs"] ) snake_case__ :Optional[Any] = int(config["seed"] ) snake_case__ :List[Any] = int(config["batch_size"] ) snake_case__ :List[Any] = args.model_name_or_path set_seed(__snake_case ) snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case ) # Instantiate optimizer snake_case__ :int = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case ) if accelerator.state.deepspeed_plugin is not None: snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: snake_case__ :Any = 1 snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case__ :Optional[Any] = get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , ) else: snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # We need to keep track of how many total steps we have iterated over snake_case__ :Dict = 0 # We also need to keep track of the stating epoch so files are named properly snake_case__ :Union[str, Any] = 0 snake_case__ :List[str] = evaluate.load("glue" , "mrpc" ) snake_case__ :Optional[Any] = num_epochs if args.partial_train_epoch is not None: snake_case__ :List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1] snake_case__ :Dict = "" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break snake_case__ :str = int(__snake_case ) + 1 snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) accelerator.print("resumed checkpoint performance:" , __snake_case ) accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] ) accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] ) with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f: snake_case__ :Tuple = json.load(__snake_case ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model snake_case__ :Optional[int] = {} for epoch in range(__snake_case , __snake_case ): model.train() for step, batch in enumerate(__snake_case ): snake_case__ :str = model(**__snake_case ) snake_case__ :List[str] = outputs.loss snake_case__ :List[Any] = loss / gradient_accumulation_steps accelerator.backward(__snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 snake_case__ :int = F'epoch_{epoch}' snake_case__ :str = os.path.join(args.output_dir , __snake_case ) accelerator.save_state(__snake_case ) snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) snake_case__ :List[str] = accuracy snake_case__ :List[str] = lr_scheduler.get_lr()[0] snake_case__ :List[Any] = optimizer.param_groups[0]["lr"] snake_case__ :Dict = epoch snake_case__ :List[Any] = overall_step accelerator.print(F'epoch {epoch}:' , __snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f: json.dump(__snake_case , __snake_case ) def lowercase_ ( ) -> Any: '''simple docstring''' snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , ) parser.add_argument( "--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , ) parser.add_argument( "--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , ) snake_case__ :Any = parser.parse_args() snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
57
1
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex __UpperCAmelCase : Dict = logging.getLogger(__name__) class _snake_case : def __init__( self ) -> Optional[int]: snake_case__ :List[Any] = False def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]: if not self.initialized: snake_case__ :Dict = RagRetriever( UpperCamelCase ,question_encoder_tokenizer=UpperCamelCase ,generator_tokenizer=UpperCamelCase ,index=UpperCamelCase ,init_retrieval=UpperCamelCase ,) snake_case__ :List[Any] = True def lowerCAmelCase_ ( self ) -> int: self.retriever.index.init_index() def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> List[str]: snake_case__ , snake_case__ :List[Any] = self.retriever._main_retrieve(UpperCamelCase ,UpperCamelCase ) return doc_ids, retrieved_doc_embeds class _snake_case ( _A ): def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ) -> Dict: if index is not None and index.is_initialized() and len(UpperCamelCase ) > 0: raise ValueError( "When using Ray for distributed fine-tuning, " "you'll need to provide the paths instead, " "as the dataset and the index are loaded " "separately. More info in examples/rag/use_own_knowledge_dataset.py " ) super().__init__( UpperCamelCase ,question_encoder_tokenizer=UpperCamelCase ,generator_tokenizer=UpperCamelCase ,index=UpperCamelCase ,init_retrieval=UpperCamelCase ,) snake_case__ :Union[str, Any] = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) for worker in self.retrieval_workers ] ) def lowerCAmelCase_ ( self ) -> List[Any]: logger.info("initializing retrieval" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int: if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. snake_case__ :Optional[int] = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )] snake_case__ , snake_case__ :str = ray.get(random_worker.retrieve.remote(UpperCamelCase ,UpperCamelCase ) ) else: snake_case__ , snake_case__ :List[Any] = self._main_retrieve(UpperCamelCase ,UpperCamelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase ) @classmethod def lowerCAmelCase_ ( cls ,UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ) -> str: return super(UpperCamelCase ,cls ).get_tokenizers(UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ) @classmethod def lowerCAmelCase_ ( cls ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ) -> int: snake_case__ :str = kwargs.pop("config" ,UpperCamelCase ) or RagConfig.from_pretrained(UpperCamelCase ,**UpperCamelCase ) snake_case__ :List[str] = RagTokenizer.from_pretrained(UpperCamelCase ,config=UpperCamelCase ) snake_case__ :Any = rag_tokenizer.question_encoder snake_case__ :Any = rag_tokenizer.generator if indexed_dataset is not None: snake_case__ :List[str] = "custom" snake_case__ :Tuple = CustomHFIndex(config.retrieval_vector_size ,UpperCamelCase ) else: snake_case__ :Union[str, Any] = cls._build_index(UpperCamelCase ) return cls( UpperCamelCase ,question_encoder_tokenizer=UpperCamelCase ,generator_tokenizer=UpperCamelCase ,retrieval_workers=UpperCamelCase ,index=UpperCamelCase ,)
57
from __future__ import annotations class _snake_case : def __init__( self ,UpperCamelCase ) -> None: snake_case__ :Union[str, Any] = data snake_case__ :Node | None = None snake_case__ :Node | None = None def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowercase_ ( __snake_case : Node | None ) -> int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowercase_ ( __snake_case : Node ) -> bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowercase_ ( ) -> None: # Main function for testing. '''simple docstring''' snake_case__ :Dict = Node(1 ) snake_case__ :int = Node(2 ) snake_case__ :Optional[Any] = Node(3 ) snake_case__ :Tuple = Node(4 ) snake_case__ :str = Node(5 ) snake_case__ :Optional[Any] = Node(6 ) snake_case__ :List[Any] = Node(7 ) snake_case__ :List[str] = Node(8 ) snake_case__ :Tuple = Node(9 ) print(is_full_binary_tree(__snake_case ) ) print(depth_of_tree(__snake_case ) ) print("Tree is: " ) display(__snake_case ) if __name__ == "__main__": main()
57
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase : Dict = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __UpperCAmelCase : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"), ("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"), ("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"), ("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"), ("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"), ("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"), ("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"), ("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"), ("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"), ("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"), ] ) def lowercase_ ( __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: '''simple docstring''' snake_case__ :int = state_dict.pop(__snake_case ) snake_case__ :List[str] = val def lowercase_ ( __snake_case : Optional[int] ) -> List[Any]: '''simple docstring''' snake_case__ :Optional[Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case__ :Tuple = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) snake_case__ :Any = value else: snake_case__ :Any = value return new_state_dict def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Optional[int]=False ) -> str: '''simple docstring''' snake_case__ :Union[str, Any] = "" if is_panoptic: snake_case__ :Dict = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case__ :List[Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) snake_case__ :Tuple = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict snake_case__ :Dict = in_proj_weight[:2_56, :] snake_case__ :int = in_proj_bias[:2_56] snake_case__ :str = in_proj_weight[2_56:5_12, :] snake_case__ :Union[str, Any] = in_proj_bias[2_56:5_12] snake_case__ :Union[str, Any] = in_proj_weight[-2_56:, :] snake_case__ :Optional[int] = in_proj_bias[-2_56:] def lowercase_ ( ) -> Optional[int]: '''simple docstring''' snake_case__ :str = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case__ :Optional[int] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return im @torch.no_grad() def lowercase_ ( __snake_case : Any , __snake_case : Optional[int] ) -> Dict: '''simple docstring''' snake_case__ :Any = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case__ :Dict = "resnet101" if "dc5" in model_name: snake_case__ :Any = True snake_case__ :str = "panoptic" in model_name if is_panoptic: snake_case__ :Union[str, Any] = 2_50 else: snake_case__ :Optional[int] = 91 snake_case__ :List[str] = "huggingface/label-files" snake_case__ :int = "coco-detection-id2label.json" snake_case__ :Union[str, Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) ) snake_case__ :Optional[int] = {int(__snake_case ): v for k, v in idalabel.items()} snake_case__ :Optional[Any] = idalabel snake_case__ :List[Any] = {v: k for k, v in idalabel.items()} # load image processor snake_case__ :Tuple = "coco_panoptic" if is_panoptic else "coco_detection" snake_case__ :Dict = ConditionalDetrImageProcessor(format=__snake_case ) # prepare image snake_case__ :List[str] = prepare_img() snake_case__ :List[Any] = image_processor(images=__snake_case , return_tensors="pt" ) snake_case__ :Any = encoding["pixel_values"] logger.info(F'Converting model {model_name}...' ) # load original model from torch hub snake_case__ :Any = torch.hub.load("DeppMeng/ConditionalDETR" , __snake_case , pretrained=__snake_case ).eval() snake_case__ :str = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case__ :Optional[Any] = "conditional_detr." + src rename_key(__snake_case , __snake_case , __snake_case ) snake_case__ :Optional[Any] = rename_backbone_keys(__snake_case ) # query, key and value matrices need special treatment read_in_q_k_v(__snake_case , is_panoptic=__snake_case ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case__ :Dict = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): snake_case__ :int = state_dict.pop(__snake_case ) snake_case__ :int = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case__ :Any = state_dict.pop(__snake_case ) snake_case__ :List[Any] = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: snake_case__ :Any = state_dict.pop(__snake_case ) snake_case__ :List[Any] = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): snake_case__ :Union[str, Any] = state_dict.pop(__snake_case ) snake_case__ :List[str] = val # finally, create HuggingFace model and load state dict snake_case__ :Tuple = ConditionalDetrForSegmentation(__snake_case ) if is_panoptic else ConditionalDetrForObjectDetection(__snake_case ) model.load_state_dict(__snake_case ) model.eval() model.push_to_hub(repo_id=__snake_case , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion snake_case__ :List[str] = conditional_detr(__snake_case ) snake_case__ :Optional[Any] = model(__snake_case ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 ) # Save model and image processor logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) model.save_pretrained(__snake_case ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": __UpperCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( "--model_name", default="conditional_detr_resnet50", type=str, help="Name of the CONDITIONAL_DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) __UpperCAmelCase : Tuple = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
57
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __UpperCAmelCase : List[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __UpperCAmelCase : int = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("\n".join(upper_files) + "\n") __UpperCAmelCase : Any = [file for file in filepaths if " " in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("\n".join(space_files) + "\n") __UpperCAmelCase : str = [file for file in filepaths if "-" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("\n".join(hyphen_files) + "\n") __UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("\n".join(nodir_files) + "\n") __UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
57
1
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class _snake_case ( unittest.TestCase ): def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=7 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=99 ,UpperCamelCase=32 ,UpperCamelCase=5 ,UpperCamelCase=4 ,UpperCamelCase=37 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=512 ,UpperCamelCase=16 ,UpperCamelCase=2 ,UpperCamelCase=0.02 ,UpperCamelCase=4 ,) -> Optional[int]: snake_case__ :Optional[Any] = parent snake_case__ :Tuple = batch_size snake_case__ :Tuple = seq_length snake_case__ :Optional[int] = is_training snake_case__ :str = use_attention_mask snake_case__ :Dict = use_token_type_ids snake_case__ :int = use_labels snake_case__ :Optional[Any] = vocab_size snake_case__ :Dict = hidden_size snake_case__ :List[str] = num_hidden_layers snake_case__ :Tuple = num_attention_heads snake_case__ :Optional[int] = intermediate_size snake_case__ :Optional[int] = hidden_act snake_case__ :Optional[int] = hidden_dropout_prob snake_case__ :List[str] = attention_probs_dropout_prob snake_case__ :Any = max_position_embeddings snake_case__ :List[Any] = type_vocab_size snake_case__ :List[Any] = type_sequence_label_size snake_case__ :Optional[int] = initializer_range snake_case__ :List[Any] = num_choices def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case__ :List[Any] = None if self.use_attention_mask: snake_case__ :Tuple = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ :List[str] = None if self.use_token_type_ids: snake_case__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) snake_case__ :Tuple = AlbertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=UpperCamelCase ,initializer_range=self.initializer_range ,) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ :Union[str, Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ , snake_case__ :Any = config_and_inputs snake_case__ :Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _snake_case ( _A , unittest.TestCase ): _A = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Union[str, Any] = FlaxAlbertModelTester(self ) @slow def lowerCAmelCase_ ( self ) -> str: for model_class_name in self.all_model_classes: snake_case__ :Union[str, Any] = model_class_name.from_pretrained("albert-base-v2" ) snake_case__ :Any = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase ) @require_flax class _snake_case ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self ) -> int: snake_case__ :Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" ) snake_case__ :Tuple = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) snake_case__ :List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) snake_case__ :Tuple = model(UpperCamelCase ,attention_mask=UpperCamelCase )[0] snake_case__ :Optional[int] = (1, 11, 768) self.assertEqual(output.shape ,UpperCamelCase ) snake_case__ :Tuple = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,UpperCamelCase ,atol=1E-4 ) )
57
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]: '''simple docstring''' snake_case__ :Dict = "" for i in table: res += inp[i - 1] return res def lowercase_ ( __snake_case : List[str] ) -> int: '''simple docstring''' return data[1:] + data[0] def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case__ :Union[str, Any] = "" for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case__ :int = int("0b" + data[0] + data[-1] , 2 ) snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]: '''simple docstring''' snake_case__ :Tuple = message[:4] snake_case__ :int = message[4:] snake_case__ :int = apply_table(__snake_case , __snake_case ) snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case ) snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741 snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] ) snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741 snake_case__ :int = "0" * (2 - len(__snake_case )) + r snake_case__ :Optional[Any] = apply_table(l + r , __snake_case ) snake_case__ :Tuple = xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": __UpperCAmelCase : Dict = input("Enter 10 bit key: ") __UpperCAmelCase : Tuple = input("Enter 8 bit message: ") __UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9] __UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] __UpperCAmelCase : Tuple = [2, 4, 3, 1] __UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7] __UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] __UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1] __UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __UpperCAmelCase : int = apply_table(key, paa_table) __UpperCAmelCase : Dict = temp[:5] __UpperCAmelCase : Optional[int] = temp[5:] __UpperCAmelCase : Optional[int] = left_shift(left) __UpperCAmelCase : Union[str, Any] = left_shift(right) __UpperCAmelCase : int = apply_table(left + right, pa_table) __UpperCAmelCase : Tuple = left_shift(left) __UpperCAmelCase : Union[str, Any] = left_shift(right) __UpperCAmelCase : Dict = left_shift(left) __UpperCAmelCase : Optional[Any] = left_shift(right) __UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table) # encryption __UpperCAmelCase : Tuple = apply_table(message, IP) __UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : List[Any] = temp[4:] + temp[:4] __UpperCAmelCase : int = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption __UpperCAmelCase : List[Any] = apply_table(CT, IP) __UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : int = temp[4:] + temp[:4] __UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
57
1
def lowercase_ ( __snake_case : int ) -> int: '''simple docstring''' if n == 1 or not isinstance(__snake_case , __snake_case ): return 0 elif n == 2: return 1 else: snake_case__ :Any = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def lowercase_ ( __snake_case : int ) -> int: '''simple docstring''' snake_case__ :List[str] = 0 snake_case__ :Dict = 2 while digits < n: index += 1 snake_case__ :List[str] = len(str(fibonacci(__snake_case ) ) ) return index def lowercase_ ( __snake_case : int = 10_00 ) -> int: '''simple docstring''' return fibonacci_digits_index(__snake_case ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
57
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _snake_case ( _A , _A , _A ): @register_to_config def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int: super().__init__() snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :Any = False snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase ) snake_case__ :Tuple = TaConfig( vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,) snake_case__ :List[str] = nn.ModuleList() for lyr_num in range(UpperCamelCase ): snake_case__ :List[Any] = TaBlock(UpperCamelCase ) self.encoders.append(UpperCamelCase ) snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase ) snake_case__ :Any = nn.Dropout(p=UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int: snake_case__ :str = self.token_embedder(UpperCamelCase ) snake_case__ :int = encoder_input_tokens.shape[1] snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase ) snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase ) # inverted the attention mask snake_case__ :Optional[Any] = encoder_input_tokens.size() snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase ) for lyr in self.encoders: snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0] snake_case__ :List[Any] = self.layer_norm(UpperCamelCase ) return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
57
1
def lowercase_ ( ) -> Dict: '''simple docstring''' for n in range(1 , 1_00_00_00 ): yield n * (n + 1) // 2 def lowercase_ ( __snake_case : List[Any] ) -> List[str]: '''simple docstring''' snake_case__ :Dict = 1 snake_case__ :Tuple = 2 while i * i <= n: snake_case__ :str = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowercase_ ( ) -> Dict: '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(__snake_case ) > 5_00 ) if __name__ == "__main__": print(solution())
57
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} __UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"] def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]: '''simple docstring''' snake_case__ :List[Any] = start # add current to visited visited.append(__snake_case ) snake_case__ :List[str] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # if all neighbors visited add current to sort sort.append(__snake_case ) # if all vertices haven't been visited select a new one to visit if len(__snake_case ) != len(__snake_case ): for vertice in vertices: if vertice not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # return sort return sort if __name__ == "__main__": __UpperCAmelCase : Tuple = topological_sort("a", [], []) print(sort)
57
1
import math def lowercase_ ( __snake_case : int ) -> bool: '''simple docstring''' return math.sqrt(__snake_case ) * math.sqrt(__snake_case ) == num def lowercase_ ( __snake_case : int ) -> bool: '''simple docstring''' snake_case__ :str = 0 snake_case__ :str = n while left <= right: snake_case__ :Optional[int] = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: snake_case__ :Union[str, Any] = mid - 1 else: snake_case__ :Union[str, Any] = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
57
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self ) -> str: snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ :List[str] = controlnet_params snake_case__ :Union[str, Any] = "bird" snake_case__ :Optional[int] = jax.device_count() snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case__ :Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples ) snake_case__ :List[str] = jax.random.PRNGKey(0 ) snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() ) snake_case__ :int = replicate(UpperCamelCase ) snake_case__ :Any = shard(UpperCamelCase ) snake_case__ :Any = shard(UpperCamelCase ) snake_case__ :str = pipe( prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case__ :Any = images[0, 253:256, 253:256, -1] snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case__ :List[Any] = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ :str = controlnet_params snake_case__ :int = "Chef in the kitchen" snake_case__ :List[Any] = jax.device_count() snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case__ :Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples ) snake_case__ :List[str] = jax.random.PRNGKey(0 ) snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() ) snake_case__ :Dict = replicate(UpperCamelCase ) snake_case__ :Tuple = shard(UpperCamelCase ) snake_case__ :Optional[int] = shard(UpperCamelCase ) snake_case__ :Optional[Any] = pipe( prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case__ :List[str] = images[0, 253:256, 253:256, -1] snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case__ :List[str] = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
57
1
import tensorflow as tf from ...tf_utils import shape_list class _snake_case ( tf.keras.layers.Layer ): def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=1 ,UpperCamelCase=False ,**UpperCamelCase ) -> Any: super().__init__(**UpperCamelCase ) snake_case__ :List[Any] = vocab_size snake_case__ :Any = d_embed snake_case__ :Tuple = d_proj snake_case__ :Any = cutoffs + [vocab_size] snake_case__ :str = [0] + self.cutoffs snake_case__ :str = div_val snake_case__ :Dict = self.cutoffs[0] snake_case__ :Tuple = len(self.cutoffs ) - 1 snake_case__ :int = self.shortlist_size + self.n_clusters snake_case__ :List[str] = keep_order snake_case__ :List[Any] = [] snake_case__ :Any = [] def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[str]: if self.n_clusters > 0: snake_case__ :int = self.add_weight( shape=(self.n_clusters, self.d_embed) ,initializer="zeros" ,trainable=UpperCamelCase ,name="cluster_weight" ) snake_case__ :str = self.add_weight( shape=(self.n_clusters,) ,initializer="zeros" ,trainable=UpperCamelCase ,name="cluster_bias" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: snake_case__ :Optional[int] = self.add_weight( shape=(self.d_embed, self.d_proj) ,initializer="zeros" ,trainable=UpperCamelCase ,name=f'out_projs_._{i}' ,) self.out_projs.append(UpperCamelCase ) else: self.out_projs.append(UpperCamelCase ) snake_case__ :str = self.add_weight( shape=(self.vocab_size, self.d_embed) ,initializer="zeros" ,trainable=UpperCamelCase ,name=f'out_layers_._{i}_._weight' ,) snake_case__ :Tuple = self.add_weight( shape=(self.vocab_size,) ,initializer="zeros" ,trainable=UpperCamelCase ,name=f'out_layers_._{i}_._bias' ,) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): snake_case__ , snake_case__ :List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case__ :Optional[Any] = self.d_embed // (self.div_val**i) snake_case__ :List[str] = self.add_weight( shape=(d_emb_i, self.d_proj) ,initializer="zeros" ,trainable=UpperCamelCase ,name=f'out_projs_._{i}' ) self.out_projs.append(UpperCamelCase ) snake_case__ :List[Any] = self.add_weight( shape=(r_idx - l_idx, d_emb_i) ,initializer="zeros" ,trainable=UpperCamelCase ,name=f'out_layers_._{i}_._weight' ,) snake_case__ :List[Any] = self.add_weight( shape=(r_idx - l_idx,) ,initializer="zeros" ,trainable=UpperCamelCase ,name=f'out_layers_._{i}_._bias' ,) self.out_layers.append((weight, bias) ) super().build(UpperCamelCase ) @staticmethod def lowerCAmelCase_ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ) -> str: snake_case__ :Any = x if proj is not None: snake_case__ :List[Any] = tf.einsum("ibd,ed->ibe" ,UpperCamelCase ,UpperCamelCase ) return tf.einsum("ibd,nd->ibn" ,UpperCamelCase ,UpperCamelCase ) + b @staticmethod def lowerCAmelCase_ ( UpperCamelCase ,UpperCamelCase ) -> Optional[Any]: snake_case__ :int = shape_list(UpperCamelCase ) snake_case__ :Union[str, Any] = tf.range(lp_size[0] ,dtype=target.dtype ) snake_case__ :Tuple = tf.stack([r, target] ,1 ) return tf.gather_nd(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=True ,UpperCamelCase=False ) -> str: snake_case__ :int = 0 if self.n_clusters == 0: snake_case__ :List[str] = self._logit(UpperCamelCase ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] ) if target is not None: snake_case__ :Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase ,logits=UpperCamelCase ) snake_case__ :int = tf.nn.log_softmax(UpperCamelCase ,axis=-1 ) else: snake_case__ :Optional[int] = shape_list(UpperCamelCase ) snake_case__ :str = [] snake_case__ :int = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): snake_case__ , snake_case__ :List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: snake_case__ :Optional[int] = (target >= l_idx) & (target < r_idx) snake_case__ :Any = tf.where(UpperCamelCase ) snake_case__ :List[Any] = tf.boolean_mask(UpperCamelCase ,UpperCamelCase ) - l_idx if self.div_val == 1: snake_case__ :Union[str, Any] = self.out_layers[0][0][l_idx:r_idx] snake_case__ :int = self.out_layers[0][1][l_idx:r_idx] else: snake_case__ :int = self.out_layers[i][0] snake_case__ :str = self.out_layers[i][1] if i == 0: snake_case__ :Optional[int] = tf.concat([cur_W, self.cluster_weight] ,0 ) snake_case__ :Union[str, Any] = tf.concat([cur_b, self.cluster_bias] ,0 ) snake_case__ :Optional[Any] = self._logit(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,self.out_projs[0] ) snake_case__ :Optional[Any] = tf.nn.log_softmax(UpperCamelCase ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: snake_case__ :int = tf.boolean_mask(UpperCamelCase ,UpperCamelCase ) snake_case__ :Optional[Any] = self._gather_logprob(UpperCamelCase ,UpperCamelCase ) else: snake_case__ :Any = self._logit(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,self.out_projs[i] ) snake_case__ :Union[str, Any] = tf.nn.log_softmax(UpperCamelCase ) snake_case__ :str = self.cutoffs[0] + i - 1 # No probability for the head cluster snake_case__ :Dict = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(UpperCamelCase ) if target is not None: snake_case__ :Union[str, Any] = tf.boolean_mask(UpperCamelCase ,UpperCamelCase ) snake_case__ :Any = tf.boolean_mask(UpperCamelCase ,UpperCamelCase ) snake_case__ :str = self._gather_logprob(UpperCamelCase ,UpperCamelCase ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(UpperCamelCase ,-cur_logprob ,shape_list(UpperCamelCase ) ) snake_case__ :List[str] = tf.concat(UpperCamelCase ,axis=-1 ) if target is not None: if return_mean: snake_case__ :int = tf.reduce_mean(UpperCamelCase ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(UpperCamelCase ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(UpperCamelCase ,name=self.name ,aggregation="mean" if return_mean else "" ) return out
57
def lowercase_ ( __snake_case : list ) -> list: '''simple docstring''' if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ): raise TypeError("Sequence must be list of non-negative integers" ) for _ in range(len(__snake_case ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
57
1
from math import log from scipy.constants import Boltzmann, physical_constants __UpperCAmelCase : Union[str, Any] = 3_0_0 # TEMPERATURE (unit = K) def lowercase_ ( __snake_case : float , __snake_case : float , __snake_case : float , ) -> float: '''simple docstring''' if donor_conc <= 0: raise ValueError("Donor concentration should be positive" ) elif acceptor_conc <= 0: raise ValueError("Acceptor concentration should be positive" ) elif intrinsic_conc <= 0: raise ValueError("Intrinsic concentration should be positive" ) elif donor_conc <= intrinsic_conc: raise ValueError( "Donor concentration should be greater than intrinsic concentration" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( "Acceptor concentration should be greater than intrinsic concentration" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
57
from __future__ import annotations def lowercase_ ( __snake_case : list ) -> float: '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(__snake_case ) / len(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
57
1
class _snake_case : def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict: snake_case__ :List[str] = name snake_case__ :Any = value snake_case__ :Optional[int] = weight def __repr__( self ) -> Any: return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def lowerCAmelCase_ ( self ) -> Union[str, Any]: return self.value def lowerCAmelCase_ ( self ) -> Optional[Any]: return self.name def lowerCAmelCase_ ( self ) -> Optional[Any]: return self.weight def lowerCAmelCase_ ( self ) -> Dict: return self.value / self.weight def lowercase_ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : str ) -> str: '''simple docstring''' snake_case__ :List[str] = [] for i in range(len(__snake_case ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def lowercase_ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case__ :Union[str, Any] = sorted(__snake_case , key=__snake_case , reverse=__snake_case ) snake_case__ :Optional[int] = [] snake_case__ , snake_case__ :Tuple = 0.0, 0.0 for i in range(len(__snake_case ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def lowercase_ ( ) -> Dict: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
57
from __future__ import annotations import math def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int: '''simple docstring''' if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__snake_case ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) return min( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) def lowercase_ ( ) -> None: '''simple docstring''' snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] snake_case__ :int = math.log(len(__snake_case ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
57
1
from __future__ import annotations def lowercase_ ( __snake_case : int | float | str , __snake_case : int | float | str ) -> list[str]: '''simple docstring''' if nth_term == "": return [""] snake_case__ :Any = int(__snake_case ) snake_case__ :Union[str, Any] = int(__snake_case ) snake_case__ :list[str] = [] for temp in range(int(__snake_case ) ): series.append(F'1 / {pow(temp + 1 , int(__snake_case ) )}' if series else "1" ) return series if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase : Union[str, Any] = int(input("Enter the last number (nth term) of the P-Series")) __UpperCAmelCase : Optional[Any] = int(input("Enter the power for P-Series")) print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p") print(p_series(nth_term, power))
57
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any: '''simple docstring''' snake_case__ :Optional[Any] = b.T snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 ) snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 ) snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case ) snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :] return d def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any: '''simple docstring''' snake_case__ :Optional[Any] = x.reshape(-1 , 3 ) snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case ) return np.argmin(__snake_case , axis=1 ) class _snake_case ( _A ): _A = ['pixel_values'] def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None: super().__init__(**UpperCamelCase ) snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256} snake_case__ :str = get_size_dict(UpperCamelCase ) snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None snake_case__ :str = do_resize snake_case__ :List[str] = size snake_case__ :List[Any] = resample snake_case__ :Union[str, Any] = do_normalize snake_case__ :int = do_color_quantize def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray: snake_case__ :List[str] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' ) return resize( UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray: snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase ) snake_case__ :List[Any] = image - 1 return image def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image: snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize snake_case__ :int = size if size is not None else self.size snake_case__ :Tuple = get_size_dict(UpperCamelCase ) snake_case__ :str = resample if resample is not None else self.resample snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize snake_case__ :List[Any] = clusters if clusters is not None else self.clusters snake_case__ :str = np.array(UpperCamelCase ) snake_case__ :int = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images] if do_normalize: snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images] if do_color_quantize: snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) snake_case__ :Union[str, Any] = np.array(UpperCamelCase ) snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) snake_case__ :List[Any] = images.shape[0] snake_case__ :str = images.reshape(UpperCamelCase ,-1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. snake_case__ :Any = list(UpperCamelCase ) else: snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images] snake_case__ :List[str] = {"input_ids": images} return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
57
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase : Dict = logging.get_logger(__name__) __UpperCAmelCase : List[str] = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class _snake_case ( _A ): _A = 'open-llama' def __init__( self ,UpperCamelCase=100_000 ,UpperCamelCase=4_096 ,UpperCamelCase=11_008 ,UpperCamelCase=32 ,UpperCamelCase=32 ,UpperCamelCase="silu" ,UpperCamelCase=2_048 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-6 ,UpperCamelCase=True ,UpperCamelCase=0 ,UpperCamelCase=1 ,UpperCamelCase=2 ,UpperCamelCase=False ,UpperCamelCase=True ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=None ,**UpperCamelCase ,) -> str: snake_case__ :str = vocab_size snake_case__ :Union[str, Any] = max_position_embeddings snake_case__ :Union[str, Any] = hidden_size snake_case__ :str = intermediate_size snake_case__ :List[Any] = num_hidden_layers snake_case__ :Tuple = num_attention_heads snake_case__ :List[str] = hidden_act snake_case__ :str = initializer_range snake_case__ :Optional[Any] = rms_norm_eps snake_case__ :Union[str, Any] = use_cache snake_case__ :Optional[Any] = kwargs.pop( "use_memorry_efficient_attention" ,UpperCamelCase ) snake_case__ :Optional[int] = hidden_dropout_prob snake_case__ :str = attention_dropout_prob snake_case__ :List[Any] = use_stable_embedding snake_case__ :Optional[int] = shared_input_output_embedding snake_case__ :Tuple = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=UpperCamelCase ,bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,tie_word_embeddings=UpperCamelCase ,**UpperCamelCase ,) def lowerCAmelCase_ ( self ) -> Dict: if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,UpperCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'got {self.rope_scaling}' ) snake_case__ :int = self.rope_scaling.get("type" ,UpperCamelCase ) snake_case__ :List[Any] = self.rope_scaling.get("factor" ,UpperCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(UpperCamelCase ,UpperCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
57
import pytest __UpperCAmelCase : int = "__dummy_dataset1__" __UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def lowercase_ ( ) -> Optional[Any]: '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowercase_ ( ) -> Optional[int]: '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict: '''simple docstring''' snake_case__ :Optional[Any] = dataset_loading_script_name snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name script_dir.mkdir(parents=__snake_case ) snake_case__ :List[Any] = script_dir / F'{script_name}.py' with open(__snake_case , "w" ) as f: f.write(__snake_case ) return str(__snake_case )
57
1
from collections.abc import Sequence from queue import Queue class _snake_case : def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,UpperCamelCase=None ) -> Tuple: snake_case__ :Any = start snake_case__ :int = end snake_case__ :str = val snake_case__ :Union[str, Any] = (start + end) // 2 snake_case__ :List[Any] = left snake_case__ :int = right def __repr__( self ) -> int: return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})' class _snake_case : def __init__( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]: snake_case__ :List[Any] = collection snake_case__ :str = function if self.collection: snake_case__ :Any = self._build_tree(0 ,len(UpperCamelCase ) - 1 ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> List[str]: self._update_tree(self.root ,UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]: return self._query_range(self.root ,UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int: if start == end: return SegmentTreeNode(UpperCamelCase ,UpperCamelCase ,self.collection[start] ) snake_case__ :List[Any] = (start + end) // 2 snake_case__ :Dict = self._build_tree(UpperCamelCase ,UpperCamelCase ) snake_case__ :str = self._build_tree(mid + 1 ,UpperCamelCase ) return SegmentTreeNode(UpperCamelCase ,UpperCamelCase ,self.fn(left.val ,right.val ) ,UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]: if node.start == i and node.end == i: snake_case__ :Optional[Any] = val return if i <= node.mid: self._update_tree(node.left ,UpperCamelCase ,UpperCamelCase ) else: self._update_tree(node.right ,UpperCamelCase ,UpperCamelCase ) snake_case__ :Tuple = self.fn(node.left.val ,node.right.val ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]: if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left ,UpperCamelCase ,UpperCamelCase ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left ,UpperCamelCase ,node.mid ) ,self._query_range(node.right ,node.mid + 1 ,UpperCamelCase ) ,) else: # range in right child tree return self._query_range(node.right ,UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Optional[Any]: if self.root is not None: snake_case__ :Optional[int] = Queue() queue.put(self.root ) while not queue.empty(): snake_case__ :Tuple = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("*" * 5_0) __UpperCAmelCase : Dict = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
57
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
57
1
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( __snake_case : Tuple , __snake_case : Dict , __snake_case : Any ) -> List[str]: '''simple docstring''' snake_case__ :Union[str, Any] = MobileBertConfig.from_json_file(__snake_case ) print(F'Building PyTorch model from configuration: {config}' ) snake_case__ :Tuple = MobileBertForPreTraining(__snake_case ) # Load weights from tf checkpoint snake_case__ :Union[str, Any] = load_tf_weights_in_mobilebert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": __UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __UpperCAmelCase : Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
57
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __UpperCAmelCase : Dict = True except ImportError: __UpperCAmelCase : List[Any] = False __UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase_ ( __snake_case : Namespace ) -> Dict: '''simple docstring''' return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class _snake_case ( _A ): @staticmethod def lowerCAmelCase_ ( UpperCamelCase ) -> Any: snake_case__ :Dict = parser.add_parser("add-new-model" ) add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." ) add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." ) add_new_model_parser.add_argument( "--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." ) add_new_model_parser.set_defaults(func=UpperCamelCase ) def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any: snake_case__ :Union[str, Any] = testing snake_case__ :Union[str, Any] = testing_file snake_case__ :List[str] = path def lowerCAmelCase_ ( self ) -> List[Any]: warnings.warn( "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. " "It is not actively maintained anymore, so might give a result that won't pass all tests and quality " "checks, you should use `transformers-cli add-new-model-like` instead." ) if not _has_cookiecutter: raise ImportError( "Model creation dependencies are required to use the `add_new_model` command. Install them by running " "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]] if len(UpperCamelCase ) > 0: raise ValueError( "Several directories starting with `cookiecutter-template-` in current working directory. " "Please clean your directory by removing all folders starting with `cookiecutter-template-` or " "change your working directory." ) snake_case__ :str = ( Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model" # Execute cookiecutter if not self._testing: cookiecutter(str(UpperCamelCase ) ) else: with open(self._testing_file ,"r" ) as configuration_file: snake_case__ :str = json.load(UpperCamelCase ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,) snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0] # Retrieve configuration with open(directory + "/configuration.json" ,"r" ) as configuration_file: snake_case__ :Dict = json.load(UpperCamelCase ) snake_case__ :Optional[Any] = configuration["lowercase_modelname"] snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"] os.remove(f'{directory}/configuration.json' ) snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase ) os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase ) # Tests require submodules as they have parent imports with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ): pass shutil.move( f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,) shutil.move( f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,) def remove_copy_lines(UpperCamelCase ): with open(UpperCamelCase ,"r" ) as f: snake_case__ :List[str] = f.readlines() with open(UpperCamelCase ,"w" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(UpperCamelCase ) if output_pytorch: if not self._testing: remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' ) if output_tensorflow: if not self._testing: remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ) if output_flax: if not self._testing: remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ) shutil.move( f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,) shutil.move( f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ): # Create temp file snake_case__ , snake_case__ :Optional[Any] = mkstemp() snake_case__ :Optional[Any] = False with fdopen(UpperCamelCase ,"w" ) as new_file: with open(UpperCamelCase ) as old_file: for line in old_file: new_file.write(UpperCamelCase ) if line_to_copy_below in line: snake_case__ :Optional[Any] = True for line_to_copy in lines_to_copy: new_file.write(UpperCamelCase ) if not line_found: raise ValueError(f'Line {line_to_copy_below} was not found in file.' ) # Copy the file permissions from the old file to the new file copymode(UpperCamelCase ,UpperCamelCase ) # Remove original file remove(UpperCamelCase ) # Move new file move(UpperCamelCase ,UpperCamelCase ) def skip_units(UpperCamelCase ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(UpperCamelCase ): with open(UpperCamelCase ) as datafile: snake_case__ :int = [] snake_case__ :Optional[int] = False snake_case__ :List[str] = False for line in datafile: if "# To replace in: " in line and "##" not in line: snake_case__ :Optional[Any] = line.split("\"" )[1] snake_case__ :Tuple = skip_units(UpperCamelCase ) elif "# Below: " in line and "##" not in line: snake_case__ :Optional[Any] = line.split("\"" )[1] snake_case__ :List[str] = skip_units(UpperCamelCase ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) snake_case__ :Tuple = [] elif "# Replace with" in line and "##" not in line: snake_case__ :Optional[Any] = [] elif "##" not in line: lines_to_copy.append(UpperCamelCase ) remove(UpperCamelCase ) replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' ) os.rmdir(UpperCamelCase )
57
1
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _snake_case ( _A ): _A = '' _A = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _A = None # compression type in fsspec. ex: "gzip" _A = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self ,UpperCamelCase = "" ,UpperCamelCase = None ,UpperCamelCase = None ,**UpperCamelCase ) -> Union[str, Any]: super().__init__(self ,**UpperCamelCase ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case__ :str = fsspec.open( UpperCamelCase ,mode="rb" ,protocol=UpperCamelCase ,compression=self.compression ,client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" ,{} ), # To avoid issues if it was already passed. } ,**(target_options or {}) ,) snake_case__ :Any = os.path.basename(self.file.path.split("::" )[0] ) snake_case__ :List[Any] = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) snake_case__ :Optional[int] = None @classmethod def lowerCAmelCase_ ( cls ,UpperCamelCase ) -> Union[str, Any]: # compressed file paths are always relative to the archive root return super()._strip_protocol(UpperCamelCase ).lstrip("/" ) def lowerCAmelCase_ ( self ) -> List[Any]: if self.dir_cache is None: snake_case__ :Any = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} snake_case__ :List[str] = {f["name"]: f} def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[int]: return self.file.open().read() def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = "rb" ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=None ,**UpperCamelCase ,) -> Union[str, Any]: snake_case__ :List[str] = self._strip_protocol(UpperCamelCase ) if mode != "rb": raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' ) return self.file.open() class _snake_case ( _A ): _A = 'bz2' _A = 'bz2' _A = '.bz2' class _snake_case ( _A ): _A = 'gzip' _A = 'gzip' _A = '.gz' class _snake_case ( _A ): _A = 'lz4' _A = 'lz4' _A = '.lz4' class _snake_case ( _A ): _A = 'xz' _A = 'xz' _A = '.xz' class _snake_case ( _A ): _A = 'zstd' _A = 'zstd' _A = '.zst' def __init__( self ,UpperCamelCase ,UpperCamelCase = "rb" ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = DEFAULT_BLOCK_SIZE ,**UpperCamelCase ,) -> Union[str, Any]: super().__init__( fo=UpperCamelCase ,mode=UpperCamelCase ,target_protocol=UpperCamelCase ,target_options=UpperCamelCase ,block_size=UpperCamelCase ,**UpperCamelCase ,) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case__ :Optional[Any] = self.file.__enter__ class _snake_case : def __init__( self ,UpperCamelCase ) -> Optional[int]: snake_case__ :Dict = file_ def __enter__( self ) -> str: self._file.__enter__() return self def __exit__( self ,*UpperCamelCase ,**UpperCamelCase ) -> Dict: self._file.__exit__(*UpperCamelCase ,**UpperCamelCase ) def __iter__( self ) -> List[str]: return iter(self._file ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: return next(self._file ) def __getattr__( self ,UpperCamelCase ) -> Union[str, Any]: return getattr(self._file ,UpperCamelCase ) def fixed_enter(*UpperCamelCase ,**UpperCamelCase ): return WrappedFile(_enter(*UpperCamelCase ,**UpperCamelCase ) ) snake_case__ :List[str] = fixed_enter
57
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer __UpperCAmelCase : str = logging.get_logger(__name__) __UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : List[Any] = { "vocab_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json" }, "merges_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt" }, } __UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4} __UpperCAmelCase : List[str] = {} class _snake_case ( _A ): _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_INIT_CONFIGURATION _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = HerbertTokenizer def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict: super().__init__( UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]: snake_case__ :Optional[int] = [self.cls_token_id] snake_case__ :Any = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1] def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]: snake_case__ :Any = [self.sep_token_id] snake_case__ :Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]: snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase ) return tuple(UpperCamelCase )
57
1
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} __UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"] def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]: '''simple docstring''' snake_case__ :List[Any] = start # add current to visited visited.append(__snake_case ) snake_case__ :List[str] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # if all neighbors visited add current to sort sort.append(__snake_case ) # if all vertices haven't been visited select a new one to visit if len(__snake_case ) != len(__snake_case ): for vertice in vertices: if vertice not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # return sort return sort if __name__ == "__main__": __UpperCAmelCase : Tuple = topological_sort("a", [], []) print(sort)
57
def lowercase_ ( __snake_case : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError("p should not be less than 2!" ) elif p == 2: return True snake_case__ :List[str] = 4 snake_case__ :Optional[int] = (1 << p) - 1 for _ in range(p - 2 ): snake_case__ :List[Any] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(1_1))
57
1
import argparse import collections import json import os import re import string import sys import numpy as np __UpperCAmelCase : Any = re.compile(R"\b(a|an|the)\b", re.UNICODE) __UpperCAmelCase : Optional[Any] = None def lowercase_ ( ) -> Any: '''simple docstring''' snake_case__ :int = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__snake_case , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__snake_case , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def lowercase_ ( __snake_case : int ) -> Union[str, Any]: '''simple docstring''' snake_case__ :Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: snake_case__ :Optional[Any] = bool(qa["answers"]["text"] ) return qid_to_has_ans def lowercase_ ( __snake_case : str ) -> List[Any]: '''simple docstring''' def remove_articles(__snake_case : List[Any] ): return ARTICLES_REGEX.sub(" " , __snake_case ) def white_space_fix(__snake_case : int ): return " ".join(text.split() ) def remove_punc(__snake_case : List[str] ): snake_case__ :Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__snake_case : List[str] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) ) def lowercase_ ( __snake_case : Optional[int] ) -> Optional[int]: '''simple docstring''' if not s: return [] return normalize_answer(__snake_case ).split() def lowercase_ ( __snake_case : str , __snake_case : Optional[Any] ) -> List[Any]: '''simple docstring''' return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) ) def lowercase_ ( __snake_case : str , __snake_case : int ) -> Dict: '''simple docstring''' snake_case__ :int = get_tokens(__snake_case ) snake_case__ :int = get_tokens(__snake_case ) snake_case__ :int = collections.Counter(__snake_case ) & collections.Counter(__snake_case ) snake_case__ :Optional[int] = sum(common.values() ) if len(__snake_case ) == 0 or len(__snake_case ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 snake_case__ :Union[str, Any] = 1.0 * num_same / len(__snake_case ) snake_case__ :List[Any] = 1.0 * num_same / len(__snake_case ) snake_case__ :List[Any] = (2 * precision * recall) / (precision + recall) return fa def lowercase_ ( __snake_case : Dict , __snake_case : Tuple ) -> Optional[int]: '''simple docstring''' snake_case__ :Optional[int] = {} snake_case__ :int = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: snake_case__ :str = qa["id"] snake_case__ :Optional[int] = [t for t in qa["answers"]["text"] if normalize_answer(__snake_case )] if not gold_answers: # For unanswerable questions, only correct answer is empty string snake_case__ :Optional[int] = [""] if qid not in preds: print(F'Missing prediction for {qid}' ) continue snake_case__ :Optional[Any] = preds[qid] # Take max over all gold answers snake_case__ :Dict = max(compute_exact(__snake_case , __snake_case ) for a in gold_answers ) snake_case__ :int = max(compute_fa(__snake_case , __snake_case ) for a in gold_answers ) return exact_scores, fa_scores def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Optional[Any]: '''simple docstring''' snake_case__ :Optional[Any] = {} for qid, s in scores.items(): snake_case__ :Any = na_probs[qid] > na_prob_thresh if pred_na: snake_case__ :Optional[Any] = float(not qid_to_has_ans[qid] ) else: snake_case__ :Any = s return new_scores def lowercase_ ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]=None ) -> Optional[Any]: '''simple docstring''' if not qid_list: snake_case__ :str = len(__snake_case ) return collections.OrderedDict( [ ("exact", 1_0_0.0 * sum(exact_scores.values() ) / total), ("f1", 1_0_0.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: snake_case__ :Tuple = len(__snake_case ) return collections.OrderedDict( [ ("exact", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def lowercase_ ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : Dict ) -> Optional[Any]: '''simple docstring''' for k in new_eval: snake_case__ :Tuple = new_eval[k] def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : str ) -> int: '''simple docstring''' plt.step(__snake_case , __snake_case , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__snake_case , __snake_case , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.0_5] ) plt.ylim([0.0, 1.0_5] ) plt.title(__snake_case ) plt.savefig(__snake_case ) plt.clf() def lowercase_ ( __snake_case : Tuple , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : Any=None ) -> Union[str, Any]: '''simple docstring''' snake_case__ :Tuple = sorted(__snake_case , key=lambda __snake_case : na_probs[k] ) snake_case__ :Tuple = 0.0 snake_case__ :List[Any] = 1.0 snake_case__ :Optional[Any] = 0.0 snake_case__ :Optional[Any] = [1.0] snake_case__ :str = [0.0] snake_case__ :int = 0.0 for i, qid in enumerate(__snake_case ): if qid_to_has_ans[qid]: true_pos += scores[qid] snake_case__ :int = true_pos / float(i + 1 ) snake_case__ :int = true_pos / float(__snake_case ) if i == len(__snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__snake_case ) recalls.append(__snake_case ) if out_image: plot_pr_curve(__snake_case , __snake_case , __snake_case , __snake_case ) return {"ap": 1_0_0.0 * avg_prec} def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : int , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Tuple ) -> Optional[Any]: '''simple docstring''' if out_image_dir and not os.path.exists(__snake_case ): os.makedirs(__snake_case ) snake_case__ :List[Any] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return snake_case__ :Dict = make_precision_recall_eval( __snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) snake_case__ :int = make_precision_recall_eval( __snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) snake_case__ :Tuple = {k: float(__snake_case ) for k, v in qid_to_has_ans.items()} snake_case__ :Optional[int] = make_precision_recall_eval( __snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__snake_case , __snake_case , "pr_exact" ) merge_eval(__snake_case , __snake_case , "pr_f1" ) merge_eval(__snake_case , __snake_case , "pr_oracle" ) def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Any ) -> Optional[int]: '''simple docstring''' if not qid_list: return snake_case__ :Optional[Any] = [na_probs[k] for k in qid_list] snake_case__ :Optional[Any] = np.ones_like(__snake_case ) / float(len(__snake_case ) ) plt.hist(__snake_case , weights=__snake_case , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(F'Histogram of no-answer probability: {name}' ) plt.savefig(os.path.join(__snake_case , F'na_prob_hist_{name}.png' ) ) plt.clf() def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : List[Any] ) -> List[str]: '''simple docstring''' snake_case__ :int = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) snake_case__ :str = num_no_ans snake_case__ :Any = cur_score snake_case__ :int = 0.0 snake_case__ :Any = sorted(__snake_case , key=lambda __snake_case : na_probs[k] ) for i, qid in enumerate(__snake_case ): if qid not in scores: continue if qid_to_has_ans[qid]: snake_case__ :Any = scores[qid] else: if preds[qid]: snake_case__ :Optional[int] = -1 else: snake_case__ :List[str] = 0 cur_score += diff if cur_score > best_score: snake_case__ :List[str] = cur_score snake_case__ :List[Any] = na_probs[qid] return 1_0_0.0 * best_score / len(__snake_case ), best_thresh def lowercase_ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] ) -> Optional[int]: '''simple docstring''' snake_case__ , snake_case__ :Tuple = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case ) snake_case__ , snake_case__ :Dict = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case ) snake_case__ :Any = best_exact snake_case__ :Tuple = exact_thresh snake_case__ :Any = best_fa snake_case__ :str = fa_thresh def lowercase_ ( ) -> Tuple: '''simple docstring''' with open(OPTS.data_file ) as f: snake_case__ :Any = json.load(__snake_case ) snake_case__ :Any = dataset_json["data"] with open(OPTS.pred_file ) as f: snake_case__ :List[str] = json.load(__snake_case ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: snake_case__ :str = json.load(__snake_case ) else: snake_case__ :Optional[Any] = {k: 0.0 for k in preds} snake_case__ :List[str] = make_qid_to_has_ans(__snake_case ) # maps qid to True/False snake_case__ :Tuple = [k for k, v in qid_to_has_ans.items() if v] snake_case__ :List[str] = [k for k, v in qid_to_has_ans.items() if not v] snake_case__ , snake_case__ :Any = get_raw_scores(__snake_case , __snake_case ) snake_case__ :List[str] = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh ) snake_case__ :List[str] = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh ) snake_case__ :List[Any] = make_eval_dict(__snake_case , __snake_case ) if has_ans_qids: snake_case__ :Optional[Any] = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case ) merge_eval(__snake_case , __snake_case , "HasAns" ) if no_ans_qids: snake_case__ :Any = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case ) merge_eval(__snake_case , __snake_case , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , OPTS.out_image_dir ) histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__snake_case , __snake_case ) else: print(json.dumps(__snake_case , indent=2 ) ) if __name__ == "__main__": __UpperCAmelCase : List[Any] = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt main()
57
from typing import Any def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list: '''simple docstring''' _validation( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # Creates data structures and fill initial step snake_case__ :dict = {} snake_case__ :dict = {} for state in states_space: snake_case__ :List[Any] = observations_space[0] snake_case__ :str = ( initial_probabilities[state] * emission_probabilities[state][observation] ) snake_case__ :str = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__snake_case ) ): snake_case__ :Any = observations_space[o] snake_case__ :Tuple = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function snake_case__ :Tuple = "" snake_case__ :Union[str, Any] = -1 for k_state in states_space: snake_case__ :int = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: snake_case__ :str = probability snake_case__ :Tuple = k_state # Update probabilities and pointers dicts snake_case__ :List[str] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) snake_case__ :List[str] = arg_max # The final observation snake_case__ :str = observations_space[len(__snake_case ) - 1] # argmax for given final observation snake_case__ :Optional[int] = "" snake_case__ :List[str] = -1 for k_state in states_space: snake_case__ :List[str] = probabilities[(k_state, final_observation)] if probability > max_probability: snake_case__ :List[str] = probability snake_case__ :int = k_state snake_case__ :Any = arg_max # Process pointers backwards snake_case__ :int = last_state snake_case__ :List[str] = [] for o in range(len(__snake_case ) - 1 , -1 , -1 ): result.append(__snake_case ) snake_case__ :List[str] = pointers[previous, observations_space[o]] result.reverse() return result def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' _validate_not_empty( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) _validate_lists(__snake_case , __snake_case ) _validate_dicts( __snake_case , __snake_case , __snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None: '''simple docstring''' _validate_list(__snake_case , "observations_space" ) _validate_list(__snake_case , "states_space" ) def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None: '''simple docstring''' if not isinstance(_object , __snake_case ): snake_case__ :Optional[int] = F'{var_name} must be a list' raise ValueError(__snake_case ) else: for x in _object: if not isinstance(__snake_case , __snake_case ): snake_case__ :Any = F'{var_name} must be a list of strings' raise ValueError(__snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' _validate_dict(__snake_case , "initial_probabilities" , __snake_case ) _validate_nested_dict(__snake_case , "transition_probabilities" ) _validate_nested_dict(__snake_case , "emission_probabilities" ) def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None: '''simple docstring''' _validate_dict(_object , __snake_case , __snake_case ) for x in _object.values(): _validate_dict(__snake_case , __snake_case , __snake_case , __snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None: '''simple docstring''' if not isinstance(_object , __snake_case ): snake_case__ :str = F'{var_name} must be a dict' raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object ): snake_case__ :List[Any] = F'{var_name} all keys must be strings' raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ): snake_case__ :Optional[int] = "nested dictionary " if nested else "" snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(__snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
57
1
def lowercase_ ( __snake_case : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError("p should not be less than 2!" ) elif p == 2: return True snake_case__ :List[str] = 4 snake_case__ :Optional[int] = (1 << p) - 1 for _ in range(p - 2 ): snake_case__ :List[Any] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(1_1))
57
def lowercase_ ( __snake_case : str ) -> list: '''simple docstring''' return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(__snake_case ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
57
1
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput __UpperCAmelCase : Union[str, Any] = "scheduler_config.json" class _snake_case ( _A ): _A = 1 _A = 2 _A = 3 _A = 4 _A = 5 _A = 6 _A = 7 _A = 8 _A = 9 _A = 10 _A = 11 _A = 12 _A = 13 _A = 14 @dataclass class _snake_case ( _A ): _A = 42 class _snake_case : _A = SCHEDULER_CONFIG_NAME _A = [] _A = True @classmethod def lowerCAmelCase_ ( cls ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase=False ,**UpperCamelCase ,) -> Tuple: snake_case__ , snake_case__ , snake_case__ :str = cls.load_config( pretrained_model_name_or_path=UpperCamelCase ,subfolder=UpperCamelCase ,return_unused_kwargs=UpperCamelCase ,return_commit_hash=UpperCamelCase ,**UpperCamelCase ,) return cls.from_config(UpperCamelCase ,return_unused_kwargs=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = False ,**UpperCamelCase ) -> Union[str, Any]: self.save_config(save_directory=UpperCamelCase ,push_to_hub=UpperCamelCase ,**UpperCamelCase ) @property def lowerCAmelCase_ ( self ) -> Optional[Any]: return self._get_compatibles() @classmethod def lowerCAmelCase_ ( cls ) -> str: snake_case__ :Dict = list(set([cls.__name__] + cls._compatibles ) ) snake_case__ :Optional[int] = importlib.import_module(__name__.split("." )[0] ) snake_case__ :int = [ getattr(UpperCamelCase ,UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase ,UpperCamelCase ) ] return compatible_classes
57
def lowercase_ ( __snake_case : int = 10_00 ) -> int: '''simple docstring''' snake_case__ :int = 3 snake_case__ :int = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
57
1
def lowercase_ ( __snake_case : str ) -> list: '''simple docstring''' if n_term == "": return [] snake_case__ :list = [] for temp in range(int(__snake_case ) ): series.append(F'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": __UpperCAmelCase : int = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
57
import os import sys import unittest __UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers") class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Tuple = find_backend(" if not is_torch_available():" ) self.assertEqual(UpperCamelCase ,"torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(UpperCamelCase ,"torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") snake_case__ :str = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :int = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" ,UpperCamelCase ) self.assertIn("torch_and_transformers" ,UpperCamelCase ) self.assertIn("flax_and_transformers" ,UpperCamelCase ) self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" ,objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] ) self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] ) def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" ) self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" ) snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" ) self.assertEqual( UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" ) self.assertEqual(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
57
1
import os import sys import unittest __UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers") class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Tuple = find_backend(" if not is_torch_available():" ) self.assertEqual(UpperCamelCase ,"torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(UpperCamelCase ,"torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") snake_case__ :str = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :int = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" ,UpperCamelCase ) self.assertIn("torch_and_transformers" ,UpperCamelCase ) self.assertIn("flax_and_transformers" ,UpperCamelCase ) self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" ,objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] ) self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] ) def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" ) self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" ) snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" ) self.assertEqual( UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" ) self.assertEqual(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
57
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __UpperCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = ["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys __UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _snake_case ( _A ): _A = (DDPMScheduler,) def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> List[Any]: snake_case__ :Any = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**UpperCamelCase ) return config def lowerCAmelCase_ ( self ) -> Dict: for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase ,beta_end=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Optional[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[Any]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Dict: for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[Any]: self.check_over_configs(thresholding=UpperCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase ,prediction_type=UpperCamelCase ,sample_max_value=UpperCamelCase ,) def lowerCAmelCase_ ( self ) -> Optional[Any]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[str]: for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[str]: snake_case__ :Optional[int] = self.scheduler_classes[0] snake_case__ :Tuple = self.get_scheduler_config() snake_case__ :Union[str, Any] = scheduler_class(**UpperCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :str = self.scheduler_classes[0] snake_case__ :Optional[Any] = self.get_scheduler_config() snake_case__ :str = scheduler_class(**UpperCamelCase ) snake_case__ :Any = len(UpperCamelCase ) snake_case__ :Union[str, Any] = self.dummy_model() snake_case__ :Optional[Any] = self.dummy_sample_deter snake_case__ :int = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase ) ): # 1. predict noise residual snake_case__ :Optional[int] = model(UpperCamelCase ,UpperCamelCase ) # 2. predict previous mean of sample x_t-1 snake_case__ :Union[str, Any] = scheduler.step(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,generator=UpperCamelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance snake_case__ :Union[str, Any] = pred_prev_sample snake_case__ :int = torch.sum(torch.abs(UpperCamelCase ) ) snake_case__ :List[Any] = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def lowerCAmelCase_ ( self ) -> str: snake_case__ :List[Any] = self.scheduler_classes[0] snake_case__ :Tuple = self.get_scheduler_config(prediction_type="v_prediction" ) snake_case__ :int = scheduler_class(**UpperCamelCase ) snake_case__ :List[Any] = len(UpperCamelCase ) snake_case__ :int = self.dummy_model() snake_case__ :Any = self.dummy_sample_deter snake_case__ :List[str] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase ) ): # 1. predict noise residual snake_case__ :Optional[Any] = model(UpperCamelCase ,UpperCamelCase ) # 2. predict previous mean of sample x_t-1 snake_case__ :str = scheduler.step(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,generator=UpperCamelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance snake_case__ :List[Any] = pred_prev_sample snake_case__ :str = torch.sum(torch.abs(UpperCamelCase ) ) snake_case__ :int = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def lowerCAmelCase_ ( self ) -> Any: snake_case__ :str = self.scheduler_classes[0] snake_case__ :Dict = self.get_scheduler_config() snake_case__ :Any = scheduler_class(**UpperCamelCase ) snake_case__ :Tuple = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase ) snake_case__ :List[Any] = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase ): if i == len(UpperCamelCase ) - 1: snake_case__ :Any = -1 else: snake_case__ :Optional[Any] = timesteps[i + 1] snake_case__ :str = scheduler.previous_timestep(UpperCamelCase ) snake_case__ :Any = prev_t.item() self.assertEqual(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ :Dict = self.scheduler_classes[0] snake_case__ :List[str] = self.get_scheduler_config() snake_case__ :int = scheduler_class(**UpperCamelCase ) snake_case__ :Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(UpperCamelCase ,msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> int: snake_case__ :Optional[Any] = self.scheduler_classes[0] snake_case__ :str = self.get_scheduler_config() snake_case__ :List[Any] = scheduler_class(**UpperCamelCase ) snake_case__ :Tuple = [100, 87, 50, 1, 0] snake_case__ :Optional[Any] = len(UpperCamelCase ) with self.assertRaises(UpperCamelCase ,msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase ,timesteps=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :Union[str, Any] = self.scheduler_classes[0] snake_case__ :Any = self.get_scheduler_config() snake_case__ :List[str] = scheduler_class(**UpperCamelCase ) snake_case__ :List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase ,msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" ,): scheduler.set_timesteps(timesteps=UpperCamelCase )
57
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: # A mock response for an HTTP head request to emulate server down snake_case__ :Tuple = mock.Mock() snake_case__ :List[str] = 500 snake_case__ :Any = {} snake_case__ :Union[str, Any] = HTTPError snake_case__ :Tuple = {} # Download this model to make sure it's in the cache. snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def lowerCAmelCase_ ( self ) -> Dict: # A mock response for an HTTP head request to emulate server down snake_case__ :Union[str, Any] = mock.Mock() snake_case__ :int = 500 snake_case__ :Any = {} snake_case__ :Dict = HTTPError snake_case__ :List[Any] = {} # Download this model to make sure it's in the cache. snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase_ ( self ) -> int: # This test is for deprecated behavior and can be removed in v5 try: snake_case__ :Union[str, Any] = tempfile.mktemp() with open(UpperCamelCase ,"wb" ) as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase ) snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase ) finally: os.remove(UpperCamelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json" ,"wb" ) as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase ) snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json" ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ) @is_staging_test class _snake_case ( unittest.TestCase ): _A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def lowerCAmelCase_ ( cls ) -> Optional[int]: snake_case__ :List[str] = TOKEN HfFolder.save_token(UpperCamelCase ) @classmethod def lowerCAmelCase_ ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token ,repo_id="test-tokenizer" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" ) except HTTPError: pass def lowerCAmelCase_ ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :str = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token ) snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="test-tokenizer" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def lowerCAmelCase_ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Any = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token ) snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def lowerCAmelCase_ ( self ) -> Any: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase ) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase ) bert_tokenizer.save_pretrained(UpperCamelCase ) snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase ) tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" ) snake_case__ :List[str] = AutoTokenizer.from_pretrained( f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :int = Trie() trie.add("Hello 友達" ) self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} ) trie.add("Hello" ) trie.data self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} ) def lowerCAmelCase_ ( self ) -> int: snake_case__ :List[str] = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] ) trie.add("[CLS]" ) trie.add("extra_id_1" ) trie.add("extra_id_100" ) self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Optional[Any] = Trie() trie.add("A" ) self.assertEqual(trie.split("ABC" ) ,["A", "BC"] ) self.assertEqual(trie.split("BCA" ) ,["BC", "A"] ) def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :Any = Trie() trie.add("TOKEN]" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :List[Any] = Trie() trie.add("A" ) trie.add("P" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :str = Trie() trie.add("AB" ) trie.add("B" ) trie.add("C" ) self.assertEqual(trie.split("ABC" ) ,["AB", "C"] ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Dict = Trie() trie.add("ABC" ) trie.add("B" ) trie.add("CD" ) self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] ) def lowerCAmelCase_ ( self ) -> int: # Even if the offsets are wrong, we necessarily output correct string # parts. snake_case__ :Optional[int] = Trie() snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(UpperCamelCase ,["AB", "C"] )
57
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCAmelCase : Any = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _snake_case ( _A ): _A = 'ibert' def __init__( self ,UpperCamelCase=30_522 ,UpperCamelCase=768 ,UpperCamelCase=12 ,UpperCamelCase=12 ,UpperCamelCase=3_072 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=512 ,UpperCamelCase=2 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-12 ,UpperCamelCase=1 ,UpperCamelCase=0 ,UpperCamelCase=2 ,UpperCamelCase="absolute" ,UpperCamelCase=False ,UpperCamelCase="none" ,**UpperCamelCase ,) -> Any: super().__init__(pad_token_id=UpperCamelCase ,bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,**UpperCamelCase ) snake_case__ :Dict = vocab_size snake_case__ :List[str] = hidden_size snake_case__ :Optional[Any] = num_hidden_layers snake_case__ :List[Any] = num_attention_heads snake_case__ :str = hidden_act snake_case__ :Any = intermediate_size snake_case__ :List[str] = hidden_dropout_prob snake_case__ :List[Any] = attention_probs_dropout_prob snake_case__ :Dict = max_position_embeddings snake_case__ :Optional[Any] = type_vocab_size snake_case__ :List[str] = initializer_range snake_case__ :Any = layer_norm_eps snake_case__ :Any = position_embedding_type snake_case__ :List[Any] = quant_mode snake_case__ :List[Any] = force_dequant class _snake_case ( _A ): @property def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case__ :Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: snake_case__ :Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
57
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase : Optional[Any] = 1_6 __UpperCAmelCase : Optional[int] = 3_2 def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]: '''simple docstring''' snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case ) snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" ) def tokenize_function(__snake_case : Tuple ): # max_length=None => use the model max length (it's actually the default) snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case__ :List[Any] = datasets.map( __snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__snake_case : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. snake_case__ :Any = DataLoader( tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) snake_case__ :Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple: '''simple docstring''' model.eval() snake_case__ :Union[str, Any] = 0 for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case__ :List[Any] = model(**__snake_case ) snake_case__ :Any = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times snake_case__ , snake_case__ :Tuple = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__snake_case ) - 1: snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen] snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__snake_case , references=__snake_case , ) snake_case__ :int = metric.compute() return eval_metric["accuracy"] def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any: '''simple docstring''' snake_case__ :Any = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case__ :Union[str, Any] = config["lr"] snake_case__ :List[str] = int(config["num_epochs"] ) snake_case__ :Optional[Any] = int(config["seed"] ) snake_case__ :List[Any] = int(config["batch_size"] ) snake_case__ :List[Any] = args.model_name_or_path set_seed(__snake_case ) snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case ) # Instantiate optimizer snake_case__ :int = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case ) if accelerator.state.deepspeed_plugin is not None: snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: snake_case__ :Any = 1 snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case__ :Optional[Any] = get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , ) else: snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # We need to keep track of how many total steps we have iterated over snake_case__ :Dict = 0 # We also need to keep track of the stating epoch so files are named properly snake_case__ :Union[str, Any] = 0 snake_case__ :List[str] = evaluate.load("glue" , "mrpc" ) snake_case__ :Optional[Any] = num_epochs if args.partial_train_epoch is not None: snake_case__ :List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1] snake_case__ :Dict = "" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break snake_case__ :str = int(__snake_case ) + 1 snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) accelerator.print("resumed checkpoint performance:" , __snake_case ) accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] ) accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] ) with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f: snake_case__ :Tuple = json.load(__snake_case ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model snake_case__ :Optional[int] = {} for epoch in range(__snake_case , __snake_case ): model.train() for step, batch in enumerate(__snake_case ): snake_case__ :str = model(**__snake_case ) snake_case__ :List[str] = outputs.loss snake_case__ :List[Any] = loss / gradient_accumulation_steps accelerator.backward(__snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 snake_case__ :int = F'epoch_{epoch}' snake_case__ :str = os.path.join(args.output_dir , __snake_case ) accelerator.save_state(__snake_case ) snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) snake_case__ :List[str] = accuracy snake_case__ :List[str] = lr_scheduler.get_lr()[0] snake_case__ :List[Any] = optimizer.param_groups[0]["lr"] snake_case__ :Dict = epoch snake_case__ :List[Any] = overall_step accelerator.print(F'epoch {epoch}:' , __snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f: json.dump(__snake_case , __snake_case ) def lowercase_ ( ) -> Any: '''simple docstring''' snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , ) parser.add_argument( "--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , ) parser.add_argument( "--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , ) snake_case__ :Any = parser.parse_args() snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
57
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :str = tempfile.mkdtemp() snake_case__ :Dict = BlipImageProcessor() snake_case__ :Optional[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) snake_case__ :Optional[Any] = BlipProcessor(UpperCamelCase ,UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> Tuple: return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCamelCase ).tokenizer def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> Optional[int]: return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCamelCase ).image_processor def lowerCAmelCase_ ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self ) -> int: snake_case__ :Optional[int] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] snake_case__ :Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase_ ( self ) -> List[str]: snake_case__ :Optional[Any] = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case__ :str = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) snake_case__ :str = self.get_image_processor(do_normalize=UpperCamelCase ,padding_value=1.0 ) snake_case__ :List[Any] = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :Union[str, Any] = self.get_image_processor() snake_case__ :Optional[int] = self.get_tokenizer() snake_case__ :List[str] = BlipProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) snake_case__ :int = self.prepare_image_inputs() snake_case__ :Any = image_processor(UpperCamelCase ,return_tensors="np" ) snake_case__ :str = processor(images=UpperCamelCase ,return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Union[str, Any] = self.get_image_processor() snake_case__ :List[str] = self.get_tokenizer() snake_case__ :str = BlipProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) snake_case__ :int = "lower newer" snake_case__ :Dict = processor(text=UpperCamelCase ) snake_case__ :Dict = tokenizer(UpperCamelCase ,return_token_type_ids=UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Optional[int] = self.get_image_processor() snake_case__ :Optional[Any] = self.get_tokenizer() snake_case__ :str = BlipProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) snake_case__ :Optional[Any] = "lower newer" snake_case__ :List[Any] = self.prepare_image_inputs() snake_case__ :str = processor(text=UpperCamelCase ,images=UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase ): processor() def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :Optional[int] = self.get_image_processor() snake_case__ :Dict = self.get_tokenizer() snake_case__ :Optional[int] = BlipProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) snake_case__ :Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case__ :str = processor.batch_decode(UpperCamelCase ) snake_case__ :Optional[Any] = tokenizer.batch_decode(UpperCamelCase ) self.assertListEqual(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :Union[str, Any] = self.get_image_processor() snake_case__ :str = self.get_tokenizer() snake_case__ :int = BlipProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) snake_case__ :Optional[Any] = "lower newer" snake_case__ :List[str] = self.prepare_image_inputs() snake_case__ :Any = processor(text=UpperCamelCase ,images=UpperCamelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
57
from __future__ import annotations class _snake_case : def __init__( self ,UpperCamelCase ) -> None: snake_case__ :Union[str, Any] = data snake_case__ :Node | None = None snake_case__ :Node | None = None def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowercase_ ( __snake_case : Node | None ) -> int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowercase_ ( __snake_case : Node ) -> bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowercase_ ( ) -> None: # Main function for testing. '''simple docstring''' snake_case__ :Dict = Node(1 ) snake_case__ :int = Node(2 ) snake_case__ :Optional[Any] = Node(3 ) snake_case__ :Tuple = Node(4 ) snake_case__ :str = Node(5 ) snake_case__ :Optional[Any] = Node(6 ) snake_case__ :List[Any] = Node(7 ) snake_case__ :List[str] = Node(8 ) snake_case__ :Tuple = Node(9 ) print(is_full_binary_tree(__snake_case ) ) print(depth_of_tree(__snake_case ) ) print("Tree is: " ) display(__snake_case ) if __name__ == "__main__": main()
57
1
def lowercase_ ( __snake_case : int ) -> list[int]: '''simple docstring''' if num <= 0: raise ValueError("Input must be a positive integer" ) snake_case__ :int = [True] * (num + 1) snake_case__ :Dict = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , __snake_case ): snake_case__ :Optional[int] = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase : Optional[int] = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
57
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __UpperCAmelCase : List[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __UpperCAmelCase : int = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("\n".join(upper_files) + "\n") __UpperCAmelCase : Any = [file for file in filepaths if " " in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("\n".join(space_files) + "\n") __UpperCAmelCase : str = [file for file in filepaths if "-" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("\n".join(hyphen_files) + "\n") __UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("\n".join(nodir_files) + "\n") __UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
57
1
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge __UpperCAmelCase : Union[str, Any] = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] __UpperCAmelCase : Dict = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowercase_ ( ) -> str: '''simple docstring''' snake_case__ :List[str] = calculate_rouge(__snake_case , __snake_case , bootstrap_aggregation=__snake_case , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(__snake_case , __snake_case ) snake_case__ :Optional[int] = calculate_rouge(__snake_case , __snake_case , bootstrap_aggregation=__snake_case , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def lowercase_ ( ) -> Any: '''simple docstring''' snake_case__ :List[Any] = "rougeLsum" snake_case__ :Optional[Any] = calculate_rouge(__snake_case , __snake_case , newline_sep=__snake_case , rouge_keys=[k] )[k] snake_case__ :Optional[Any] = calculate_rouge(__snake_case , __snake_case , newline_sep=__snake_case , rouge_keys=[k] )[k] assert score > score_no_sep def lowercase_ ( ) -> str: '''simple docstring''' snake_case__ :str = ["rouge1", "rouge2", "rougeL"] snake_case__ :Optional[Any] = calculate_rouge(__snake_case , __snake_case , newline_sep=__snake_case , rouge_keys=__snake_case ) snake_case__ :Union[str, Any] = calculate_rouge(__snake_case , __snake_case , newline_sep=__snake_case , rouge_keys=__snake_case ) assert score_sep == score_no_sep def lowercase_ ( ) -> Optional[Any]: '''simple docstring''' snake_case__ :List[str] = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] snake_case__ :Optional[Any] = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(__snake_case , __snake_case , newline_sep=__snake_case ) == calculate_rouge(__snake_case , __snake_case , newline_sep=__snake_case ) def lowercase_ ( ) -> List[Any]: '''simple docstring''' snake_case__ :Optional[Any] = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] snake_case__ :Optional[int] = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] snake_case__ :Any = calculate_rouge(__snake_case , __snake_case , rouge_keys=["rougeLsum"] , newline_sep=__snake_case )["rougeLsum"] snake_case__ :Union[str, Any] = calculate_rouge(__snake_case , __snake_case , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def lowercase_ ( ) -> Optional[Any]: '''simple docstring''' snake_case__ :int = Path("examples/seq2seq/test_data/wmt_en_ro" ) snake_case__ :Any = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(__snake_case , __snake_case ) snake_case__ :List[str] = calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=__snake_case ) assert isinstance(__snake_case , __snake_case )
57
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]: '''simple docstring''' snake_case__ :Dict = "" for i in table: res += inp[i - 1] return res def lowercase_ ( __snake_case : List[str] ) -> int: '''simple docstring''' return data[1:] + data[0] def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case__ :Union[str, Any] = "" for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case__ :int = int("0b" + data[0] + data[-1] , 2 ) snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]: '''simple docstring''' snake_case__ :Tuple = message[:4] snake_case__ :int = message[4:] snake_case__ :int = apply_table(__snake_case , __snake_case ) snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case ) snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741 snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] ) snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741 snake_case__ :int = "0" * (2 - len(__snake_case )) + r snake_case__ :Optional[Any] = apply_table(l + r , __snake_case ) snake_case__ :Tuple = xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": __UpperCAmelCase : Dict = input("Enter 10 bit key: ") __UpperCAmelCase : Tuple = input("Enter 8 bit message: ") __UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9] __UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] __UpperCAmelCase : Tuple = [2, 4, 3, 1] __UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7] __UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] __UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1] __UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __UpperCAmelCase : int = apply_table(key, paa_table) __UpperCAmelCase : Dict = temp[:5] __UpperCAmelCase : Optional[int] = temp[5:] __UpperCAmelCase : Optional[int] = left_shift(left) __UpperCAmelCase : Union[str, Any] = left_shift(right) __UpperCAmelCase : int = apply_table(left + right, pa_table) __UpperCAmelCase : Tuple = left_shift(left) __UpperCAmelCase : Union[str, Any] = left_shift(right) __UpperCAmelCase : Dict = left_shift(left) __UpperCAmelCase : Optional[Any] = left_shift(right) __UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table) # encryption __UpperCAmelCase : Tuple = apply_table(message, IP) __UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : List[Any] = temp[4:] + temp[:4] __UpperCAmelCase : int = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption __UpperCAmelCase : List[Any] = apply_table(CT, IP) __UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : int = temp[4:] + temp[:4] __UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
57
1
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :Optional[Any] = 1 snake_case__ :Dict = 3 snake_case__ :int = (32, 32) snake_case__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(UpperCamelCase ) return image @property def lowerCAmelCase_ ( self ) -> Any: torch.manual_seed(0 ) snake_case__ :Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,) return model @property def lowerCAmelCase_ ( self ) -> Optional[Any]: torch.manual_seed(0 ) snake_case__ :List[str] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,) return model @property def lowerCAmelCase_ ( self ) -> Optional[Any]: torch.manual_seed(0 ) snake_case__ :Optional[Any] = RobertaSeriesConfig( hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5_006 ,) return RobertaSeriesModelWithTransformation(UpperCamelCase ) @property def lowerCAmelCase_ ( self ) -> Union[str, Any]: def extract(*UpperCamelCase ,**UpperCamelCase ): class _snake_case : def __init__( self ) -> int: snake_case__ :Dict = torch.ones([0] ) def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str: self.pixel_values.to(UpperCamelCase ) return self return Out() return extract def lowerCAmelCase_ ( self ) -> str: snake_case__ :Dict = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case__ :Optional[Any] = self.dummy_cond_unet snake_case__ :str = PNDMScheduler(skip_prk_steps=UpperCamelCase ) snake_case__ :Optional[int] = self.dummy_vae snake_case__ :Dict = self.dummy_text_encoder snake_case__ :Optional[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) snake_case__ :Optional[Any] = 77 snake_case__ :Dict = self.dummy_image.to(UpperCamelCase ) snake_case__ :List[str] = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk snake_case__ :List[str] = AltDiffusionImgaImgPipeline( unet=UpperCamelCase ,scheduler=UpperCamelCase ,vae=UpperCamelCase ,text_encoder=UpperCamelCase ,tokenizer=UpperCamelCase ,safety_checker=UpperCamelCase ,feature_extractor=self.dummy_extractor ,) snake_case__ :Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=UpperCamelCase ) snake_case__ :Dict = alt_pipe.to(UpperCamelCase ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ :Union[str, Any] = "A painting of a squirrel eating a burger" snake_case__ :int = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) snake_case__ :int = alt_pipe( [prompt] ,generator=UpperCamelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ,image=UpperCamelCase ,) snake_case__ :str = output.images snake_case__ :Optional[int] = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) snake_case__ :List[str] = alt_pipe( [prompt] ,generator=UpperCamelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ,image=UpperCamelCase ,return_dict=UpperCamelCase ,)[0] snake_case__ :Any = image[0, -3:, -3:, -1] snake_case__ :Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ :str = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Optional[int] = self.dummy_cond_unet snake_case__ :Optional[int] = PNDMScheduler(skip_prk_steps=UpperCamelCase ) snake_case__ :Optional[int] = self.dummy_vae snake_case__ :Tuple = self.dummy_text_encoder snake_case__ :Tuple = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) snake_case__ :Union[str, Any] = 77 snake_case__ :Dict = self.dummy_image.to(UpperCamelCase ) # put models in fp16 snake_case__ :Optional[Any] = unet.half() snake_case__ :Dict = vae.half() snake_case__ :Optional[Any] = bert.half() # make sure here that pndm scheduler skips prk snake_case__ :Union[str, Any] = AltDiffusionImgaImgPipeline( unet=UpperCamelCase ,scheduler=UpperCamelCase ,vae=UpperCamelCase ,text_encoder=UpperCamelCase ,tokenizer=UpperCamelCase ,safety_checker=UpperCamelCase ,feature_extractor=self.dummy_extractor ,) snake_case__ :Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=UpperCamelCase ) snake_case__ :Dict = alt_pipe.to(UpperCamelCase ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ :Optional[Any] = "A painting of a squirrel eating a burger" snake_case__ :int = torch.manual_seed(0 ) snake_case__ :Tuple = alt_pipe( [prompt] ,generator=UpperCamelCase ,num_inference_steps=2 ,output_type="np" ,image=UpperCamelCase ,).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" ) def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 snake_case__ :List[str] = init_image.resize((760, 504) ) snake_case__ :Any = "BAAI/AltDiffusion" snake_case__ :Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained( UpperCamelCase ,safety_checker=UpperCamelCase ,) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ :List[Any] = "A fantasy landscape, trending on artstation" snake_case__ :str = torch.manual_seed(0 ) snake_case__ :List[str] = pipe( prompt=UpperCamelCase ,image=UpperCamelCase ,strength=0.75 ,guidance_scale=7.5 ,generator=UpperCamelCase ,output_type="np" ,) snake_case__ :Optional[int] = output.images[0] snake_case__ :str = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) snake_case__ :str = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) snake_case__ :Dict = init_image.resize((768, 512) ) snake_case__ :Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) snake_case__ :Optional[int] = "BAAI/AltDiffusion" snake_case__ :List[Any] = AltDiffusionImgaImgPipeline.from_pretrained( UpperCamelCase ,safety_checker=UpperCamelCase ,) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) pipe.enable_attention_slicing() snake_case__ :Any = "A fantasy landscape, trending on artstation" snake_case__ :List[Any] = torch.manual_seed(0 ) snake_case__ :str = pipe( prompt=UpperCamelCase ,image=UpperCamelCase ,strength=0.75 ,guidance_scale=7.5 ,generator=UpperCamelCase ,output_type="np" ,) snake_case__ :Optional[Any] = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
57
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _snake_case ( _A , _A , _A ): @register_to_config def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int: super().__init__() snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :Any = False snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase ) snake_case__ :Tuple = TaConfig( vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,) snake_case__ :List[str] = nn.ModuleList() for lyr_num in range(UpperCamelCase ): snake_case__ :List[Any] = TaBlock(UpperCamelCase ) self.encoders.append(UpperCamelCase ) snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase ) snake_case__ :Any = nn.Dropout(p=UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int: snake_case__ :str = self.token_embedder(UpperCamelCase ) snake_case__ :int = encoder_input_tokens.shape[1] snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase ) snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase ) # inverted the attention mask snake_case__ :Optional[Any] = encoder_input_tokens.size() snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase ) for lyr in self.encoders: snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0] snake_case__ :List[Any] = self.layer_norm(UpperCamelCase ) return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
57
1
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( __snake_case : int , __snake_case : str , __snake_case : Union[str, Any] ) -> Optional[int]: '''simple docstring''' snake_case__ :Tuple = RemBertConfig.from_json_file(__snake_case ) print("Building PyTorch model from configuration: {}".format(str(__snake_case ) ) ) snake_case__ :Optional[int] = RemBertModel(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_rembert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print("Save PyTorch model to {}".format(__snake_case ) ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": __UpperCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __UpperCAmelCase : Dict = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
57
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} __UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"] def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]: '''simple docstring''' snake_case__ :List[Any] = start # add current to visited visited.append(__snake_case ) snake_case__ :List[str] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # if all neighbors visited add current to sort sort.append(__snake_case ) # if all vertices haven't been visited select a new one to visit if len(__snake_case ) != len(__snake_case ): for vertice in vertices: if vertice not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # return sort return sort if __name__ == "__main__": __UpperCAmelCase : Tuple = topological_sort("a", [], []) print(sort)
57
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
57
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self ) -> str: snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ :List[str] = controlnet_params snake_case__ :Union[str, Any] = "bird" snake_case__ :Optional[int] = jax.device_count() snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case__ :Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples ) snake_case__ :List[str] = jax.random.PRNGKey(0 ) snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() ) snake_case__ :int = replicate(UpperCamelCase ) snake_case__ :Any = shard(UpperCamelCase ) snake_case__ :Any = shard(UpperCamelCase ) snake_case__ :str = pipe( prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case__ :Any = images[0, 253:256, 253:256, -1] snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case__ :List[Any] = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ :str = controlnet_params snake_case__ :int = "Chef in the kitchen" snake_case__ :List[Any] = jax.device_count() snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case__ :Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples ) snake_case__ :List[str] = jax.random.PRNGKey(0 ) snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() ) snake_case__ :Dict = replicate(UpperCamelCase ) snake_case__ :Tuple = shard(UpperCamelCase ) snake_case__ :Optional[int] = shard(UpperCamelCase ) snake_case__ :Optional[Any] = pipe( prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case__ :List[str] = images[0, 253:256, 253:256, -1] snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case__ :List[str] = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
57
1
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase : int = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class _snake_case ( _A , unittest.TestCase ): _A = PegasusTokenizer _A = PegasusTokenizerFast _A = True _A = True def lowerCAmelCase_ ( self ) -> Tuple: super().setUp() # We have a SentencePiece fixture for testing snake_case__ :List[Any] = PegasusTokenizer(UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCAmelCase_ ( self ) -> int: return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]: return ("This is a test", "This is a test") def lowerCAmelCase_ ( self ) -> int: snake_case__ :int = "</s>" snake_case__ :str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) ,UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[str]: snake_case__ :Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"<pad>" ) self.assertEqual(vocab_keys[1] ,"</s>" ) self.assertEqual(vocab_keys[-1] ,"v" ) self.assertEqual(len(UpperCamelCase ) ,1_103 ) def lowerCAmelCase_ ( self ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size ,1_103 ) def lowerCAmelCase_ ( self ) -> Any: snake_case__ :List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) snake_case__ :Dict = self.tokenizer_class.from_pretrained(self.tmpdirname ) snake_case__ :int = ( "Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important" " </s> <pad> <pad> <pad>" ) snake_case__ :List[str] = rust_tokenizer([raw_input_str] ,return_tensors=UpperCamelCase ,add_special_tokens=UpperCamelCase ).input_ids[0] snake_case__ :Tuple = py_tokenizer([raw_input_str] ,return_tensors=UpperCamelCase ,add_special_tokens=UpperCamelCase ).input_ids[0] self.assertListEqual(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :Optional[int] = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word snake_case__ :Optional[int] = "<mask_1> To ensure a <mask_2> flow of bank resolutions." snake_case__ :Optional[Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1] snake_case__ :Optional[Any] = tokenizer([raw_input_str] ,return_tensors=UpperCamelCase ).input_ids[0] self.assertListEqual(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Optional[int] = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96_103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_024 snake_case__ :List[Any] = "To ensure a smooth flow of bank resolutions." snake_case__ :int = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1] snake_case__ :Optional[Any] = tokenizer([raw_input_str] ,return_tensors=UpperCamelCase ).input_ids[0] self.assertListEqual(UpperCamelCase ,UpperCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def lowerCAmelCase_ ( self ) -> List[str]: snake_case__ :List[str] = ["This is going to be way too long." * 150, "short example"] snake_case__ :Dict = ["not super long but more than 5 tokens", "tiny"] snake_case__ :Tuple = self._large_tokenizer(UpperCamelCase ,padding=UpperCamelCase ,truncation=UpperCamelCase ,return_tensors="pt" ) snake_case__ :Any = self._large_tokenizer( text_target=UpperCamelCase ,max_length=5 ,padding=UpperCamelCase ,truncation=UpperCamelCase ,return_tensors="pt" ) assert batch.input_ids.shape == (2, 1_024) assert batch.attention_mask.shape == (2, 1_024) assert targets["input_ids"].shape == (2, 5) assert len(UpperCamelCase ) == 2 # input_ids, attention_mask. @slow def lowerCAmelCase_ ( self ) -> str: # fmt: off snake_case__ :Union[str, Any] = {"input_ids": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase ,model_name="google/bigbird-pegasus-large-arxiv" ,revision="ba85d0851d708441f91440d509690f1ab6353415" ,) @require_sentencepiece @require_tokenizers class _snake_case ( _A , unittest.TestCase ): _A = PegasusTokenizer _A = PegasusTokenizerFast _A = True _A = True def lowerCAmelCase_ ( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing snake_case__ :Any = PegasusTokenizer(UpperCamelCase ,offset=0 ,mask_token_sent=UpperCamelCase ,mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCAmelCase_ ( self ) -> Union[str, Any]: return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str: return ("This is a test", "This is a test") def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) snake_case__ :int = self.tokenizer_class.from_pretrained(self.tmpdirname ) snake_case__ :Optional[int] = ( "Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>" " <pad> <pad> <pad>" ) snake_case__ :str = rust_tokenizer([raw_input_str] ,return_tensors=UpperCamelCase ,add_special_tokens=UpperCamelCase ).input_ids[0] snake_case__ :int = py_tokenizer([raw_input_str] ,return_tensors=UpperCamelCase ,add_special_tokens=UpperCamelCase ).input_ids[0] self.assertListEqual(UpperCamelCase ,UpperCamelCase ) @require_torch def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :List[str] = ["This is going to be way too long." * 1_000, "short example"] snake_case__ :List[str] = ["not super long but more than 5 tokens", "tiny"] snake_case__ :Dict = self._large_tokenizer(UpperCamelCase ,padding=UpperCamelCase ,truncation=UpperCamelCase ,return_tensors="pt" ) snake_case__ :Dict = self._large_tokenizer( text_target=UpperCamelCase ,max_length=5 ,padding=UpperCamelCase ,truncation=UpperCamelCase ,return_tensors="pt" ) assert batch.input_ids.shape == (2, 4_096) assert batch.attention_mask.shape == (2, 4_096) assert targets["input_ids"].shape == (2, 5) assert len(UpperCamelCase ) == 2 # input_ids, attention_mask. def lowerCAmelCase_ ( self ) -> int: snake_case__ :Optional[int] = ( "This is an example string that is used to test the original TF implementation against the HF" " implementation" ) snake_case__ :Union[str, Any] = self._large_tokenizer(UpperCamelCase ).input_ids self.assertListEqual( UpperCamelCase ,[182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] ,)
57
def lowercase_ ( __snake_case : list ) -> list: '''simple docstring''' if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ): raise TypeError("Sequence must be list of non-negative integers" ) for _ in range(len(__snake_case ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
57
1
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __UpperCAmelCase : Tuple = data_utils.TransfoXLTokenizer __UpperCAmelCase : Union[str, Any] = data_utils.TransfoXLCorpus __UpperCAmelCase : Optional[Any] = data_utils __UpperCAmelCase : Optional[Any] = data_utils def lowercase_ ( __snake_case : List[str] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[Any] ) -> Union[str, Any]: '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__snake_case , "rb" ) as fp: snake_case__ :Optional[int] = pickle.load(__snake_case , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) snake_case__ :Tuple = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(F'Save vocabulary to {pytorch_vocab_dump_path}' ) snake_case__ :Union[str, Any] = corpus.vocab.__dict__ torch.save(__snake_case , __snake_case ) snake_case__ :str = corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , __snake_case ) snake_case__ :str = pytorch_dump_folder_path + "/" + CORPUS_NAME print(F'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(__snake_case , __snake_case ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model snake_case__ :Tuple = os.path.abspath(__snake_case ) snake_case__ :int = os.path.abspath(__snake_case ) print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": snake_case__ :Dict = TransfoXLConfig() else: snake_case__ :List[Any] = TransfoXLConfig.from_json_file(__snake_case ) print(F'Building PyTorch model from configuration: {config}' ) snake_case__ :str = TransfoXLLMHeadModel(__snake_case ) snake_case__ :Any = load_tf_weights_in_transfo_xl(__snake_case , __snake_case , __snake_case ) # Save pytorch-model snake_case__ :Dict = os.path.join(__snake_case , __snake_case ) snake_case__ :int = os.path.join(__snake_case , __snake_case ) print(F'Save PyTorch model to {os.path.abspath(__snake_case )}' ) torch.save(model.state_dict() , __snake_case ) print(F'Save configuration file to {os.path.abspath(__snake_case )}' ) with open(__snake_case , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __UpperCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) __UpperCAmelCase : Tuple = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
57
from __future__ import annotations def lowercase_ ( __snake_case : list ) -> float: '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(__snake_case ) / len(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
57
1
import argparse import os import re import packaging.version __UpperCAmelCase : Dict = "examples/" __UpperCAmelCase : int = { "examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","), "doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } __UpperCAmelCase : Any = { "init": "src/diffusers/__init__.py", "setup": "setup.py", } __UpperCAmelCase : Dict = "README.md" def lowercase_ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Tuple ) -> Any: '''simple docstring''' with open(__snake_case , "r" , encoding="utf-8" , newline="\n" ) as f: snake_case__ :Optional[int] = f.read() snake_case__ , snake_case__ :Optional[Any] = REPLACE_PATTERNS[pattern] snake_case__ :Dict = replace.replace("VERSION" , __snake_case ) snake_case__ :Union[str, Any] = re_pattern.sub(__snake_case , __snake_case ) with open(__snake_case , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(__snake_case ) def lowercase_ ( __snake_case : Any ) -> int: '''simple docstring''' for folder, directories, fnames in os.walk(__snake_case ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern="examples" ) def lowercase_ ( __snake_case : Optional[int] , __snake_case : str=False ) -> int: '''simple docstring''' for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__snake_case , __snake_case , __snake_case ) if not patch: update_version_in_examples(__snake_case ) def lowercase_ ( ) -> str: '''simple docstring''' snake_case__ :int = "🤗 Transformers currently provides the following architectures" snake_case__ :List[Any] = "1. Want to contribute a new model?" with open(__snake_case , "r" , encoding="utf-8" , newline="\n" ) as f: snake_case__ :str = f.readlines() # Find the start of the list. snake_case__ :Dict = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 snake_case__ :List[str] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): snake_case__ :Union[str, Any] = lines[index].replace( "https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , ) index += 1 with open(__snake_case , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(__snake_case ) def lowercase_ ( ) -> Dict: '''simple docstring''' with open(REPLACE_FILES["init"] , "r" ) as f: snake_case__ :Optional[Any] = f.read() snake_case__ :int = REPLACE_PATTERNS["init"][0].search(__snake_case ).groups()[0] return packaging.version.parse(__snake_case ) def lowercase_ ( __snake_case : List[str]=False ) -> Tuple: '''simple docstring''' snake_case__ :Dict = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: snake_case__ :List[str] = default_version.base_version elif patch: snake_case__ :Optional[Any] = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: snake_case__ :List[str] = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. snake_case__ :List[str] = input(F'Which version are you releasing? [{default_version}]' ) if len(__snake_case ) == 0: snake_case__ :Optional[Any] = default_version print(F'Updating version to {version}.' ) global_version_update(__snake_case , patch=__snake_case ) def lowercase_ ( ) -> str: '''simple docstring''' snake_case__ :Dict = get_version() snake_case__ :Optional[Any] = F'{current_version.major}.{current_version.minor + 1}.0.dev0' snake_case__ :int = current_version.base_version # Check with the user we got that right. snake_case__ :Union[str, Any] = input(F'Which version are we developing now? [{dev_version}]' ) if len(__snake_case ) == 0: snake_case__ :Tuple = dev_version print(F'Updating version to {version}.' ) global_version_update(__snake_case ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": __UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") __UpperCAmelCase : Union[str, Any] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
57
from __future__ import annotations import math def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int: '''simple docstring''' if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__snake_case ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) return min( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) def lowercase_ ( ) -> None: '''simple docstring''' snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] snake_case__ :int = math.log(len(__snake_case ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
57
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase : int = { "configuration_x_clip": [ "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "XCLIPConfig", "XCLIPTextConfig", "XCLIPVisionConfig", ], "processing_x_clip": ["XCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[str] = [ "XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "XCLIPModel", "XCLIPPreTrainedModel", "XCLIPTextModel", "XCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys __UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any: '''simple docstring''' snake_case__ :Optional[Any] = b.T snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 ) snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 ) snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case ) snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :] return d def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any: '''simple docstring''' snake_case__ :Optional[Any] = x.reshape(-1 , 3 ) snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case ) return np.argmin(__snake_case , axis=1 ) class _snake_case ( _A ): _A = ['pixel_values'] def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None: super().__init__(**UpperCamelCase ) snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256} snake_case__ :str = get_size_dict(UpperCamelCase ) snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None snake_case__ :str = do_resize snake_case__ :List[str] = size snake_case__ :List[Any] = resample snake_case__ :Union[str, Any] = do_normalize snake_case__ :int = do_color_quantize def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray: snake_case__ :List[str] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' ) return resize( UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray: snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase ) snake_case__ :List[Any] = image - 1 return image def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image: snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize snake_case__ :int = size if size is not None else self.size snake_case__ :Tuple = get_size_dict(UpperCamelCase ) snake_case__ :str = resample if resample is not None else self.resample snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize snake_case__ :List[Any] = clusters if clusters is not None else self.clusters snake_case__ :str = np.array(UpperCamelCase ) snake_case__ :int = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images] if do_normalize: snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images] if do_color_quantize: snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) snake_case__ :Union[str, Any] = np.array(UpperCamelCase ) snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) snake_case__ :List[Any] = images.shape[0] snake_case__ :str = images.reshape(UpperCamelCase ,-1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. snake_case__ :Any = list(UpperCamelCase ) else: snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images] snake_case__ :List[str] = {"input_ids": images} return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
57
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __UpperCAmelCase : Tuple = logging.get_logger(__name__) class _snake_case ( _A ): _A = ['pixel_values'] def __init__( self ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BICUBIC ,UpperCamelCase = True ,UpperCamelCase = 1 / 255 ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = True ,**UpperCamelCase ,) -> None: super().__init__(**UpperCamelCase ) snake_case__ :Optional[Any] = size if size is not None else {"height": 384, "width": 384} snake_case__ :Tuple = get_size_dict(UpperCamelCase ,default_to_square=UpperCamelCase ) snake_case__ :int = do_resize snake_case__ :Optional[int] = size snake_case__ :Optional[int] = resample snake_case__ :List[str] = do_rescale snake_case__ :Tuple = rescale_factor snake_case__ :Optional[Any] = do_normalize snake_case__ :Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN snake_case__ :List[str] = image_std if image_std is not None else OPENAI_CLIP_STD snake_case__ :Tuple = do_convert_rgb def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BICUBIC ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray: snake_case__ :Optional[Any] = get_size_dict(UpperCamelCase ,default_to_square=UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' ) snake_case__ :str = (size["height"], size["width"]) return resize(UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> str: return rescale(UpperCamelCase ,scale=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray: return normalize(UpperCamelCase ,mean=UpperCamelCase ,std=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image: snake_case__ :str = do_resize if do_resize is not None else self.do_resize snake_case__ :List[Any] = resample if resample is not None else self.resample snake_case__ :List[Any] = do_rescale if do_rescale is not None else self.do_rescale snake_case__ :Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ :Any = do_normalize if do_normalize is not None else self.do_normalize snake_case__ :int = image_mean if image_mean is not None else self.image_mean snake_case__ :Tuple = image_std if image_std is not None else self.image_std snake_case__ :str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case__ :Optional[Any] = size if size is not None else self.size snake_case__ :Union[str, Any] = get_size_dict(UpperCamelCase ,default_to_square=UpperCamelCase ) snake_case__ :Optional[Any] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case__ :List[Any] = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. snake_case__ :str = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ :List[Any] = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images] if do_rescale: snake_case__ :int = [self.rescale(image=UpperCamelCase ,scale=UpperCamelCase ) for image in images] if do_normalize: snake_case__ :Optional[int] = [self.normalize(image=UpperCamelCase ,mean=UpperCamelCase ,std=UpperCamelCase ) for image in images] snake_case__ :str = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images] snake_case__ :List[Any] = BatchFeature(data={"pixel_values": images} ,tensor_type=UpperCamelCase ) return encoded_outputs
57
import pytest __UpperCAmelCase : int = "__dummy_dataset1__" __UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def lowercase_ ( ) -> Optional[Any]: '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowercase_ ( ) -> Optional[int]: '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict: '''simple docstring''' snake_case__ :Optional[Any] = dataset_loading_script_name snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name script_dir.mkdir(parents=__snake_case ) snake_case__ :List[Any] = script_dir / F'{script_name}.py' with open(__snake_case , "w" ) as f: f.write(__snake_case ) return str(__snake_case )
57
1
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class _snake_case ( pl.LightningModule ): def __init__( self ,UpperCamelCase ) -> Dict: super().__init__() snake_case__ :Union[str, Any] = model snake_case__ :List[str] = 2 snake_case__ :Optional[int] = nn.Linear(self.model.config.hidden_size ,self.num_labels ) def lowerCAmelCase_ ( self ) -> Dict: pass def lowercase_ ( __snake_case : str , __snake_case : str , __snake_case : str ) -> List[Any]: '''simple docstring''' snake_case__ :Tuple = LongformerModel.from_pretrained(__snake_case ) snake_case__ :Optional[Any] = LightningModel(__snake_case ) snake_case__ :List[str] = torch.load(__snake_case , map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model snake_case__ :Tuple = LongformerForQuestionAnswering.from_pretrained(__snake_case ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(__snake_case ) print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' ) if __name__ == "__main__": __UpperCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lightning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __UpperCAmelCase : Union[str, Any] = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
57
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
57
1
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _snake_case ( _A , _A , _A ): @register_to_config def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int: super().__init__() snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :Any = False snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase ) snake_case__ :Tuple = TaConfig( vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,) snake_case__ :List[str] = nn.ModuleList() for lyr_num in range(UpperCamelCase ): snake_case__ :List[Any] = TaBlock(UpperCamelCase ) self.encoders.append(UpperCamelCase ) snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase ) snake_case__ :Any = nn.Dropout(p=UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int: snake_case__ :str = self.token_embedder(UpperCamelCase ) snake_case__ :int = encoder_input_tokens.shape[1] snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase ) snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase ) # inverted the attention mask snake_case__ :Optional[Any] = encoder_input_tokens.size() snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase ) for lyr in self.encoders: snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0] snake_case__ :List[Any] = self.layer_norm(UpperCamelCase ) return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
57
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __UpperCAmelCase : Dict = True except ImportError: __UpperCAmelCase : List[Any] = False __UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase_ ( __snake_case : Namespace ) -> Dict: '''simple docstring''' return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class _snake_case ( _A ): @staticmethod def lowerCAmelCase_ ( UpperCamelCase ) -> Any: snake_case__ :Dict = parser.add_parser("add-new-model" ) add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." ) add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." ) add_new_model_parser.add_argument( "--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." ) add_new_model_parser.set_defaults(func=UpperCamelCase ) def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any: snake_case__ :Union[str, Any] = testing snake_case__ :Union[str, Any] = testing_file snake_case__ :List[str] = path def lowerCAmelCase_ ( self ) -> List[Any]: warnings.warn( "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. " "It is not actively maintained anymore, so might give a result that won't pass all tests and quality " "checks, you should use `transformers-cli add-new-model-like` instead." ) if not _has_cookiecutter: raise ImportError( "Model creation dependencies are required to use the `add_new_model` command. Install them by running " "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]] if len(UpperCamelCase ) > 0: raise ValueError( "Several directories starting with `cookiecutter-template-` in current working directory. " "Please clean your directory by removing all folders starting with `cookiecutter-template-` or " "change your working directory." ) snake_case__ :str = ( Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model" # Execute cookiecutter if not self._testing: cookiecutter(str(UpperCamelCase ) ) else: with open(self._testing_file ,"r" ) as configuration_file: snake_case__ :str = json.load(UpperCamelCase ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,) snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0] # Retrieve configuration with open(directory + "/configuration.json" ,"r" ) as configuration_file: snake_case__ :Dict = json.load(UpperCamelCase ) snake_case__ :Optional[Any] = configuration["lowercase_modelname"] snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"] os.remove(f'{directory}/configuration.json' ) snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase ) os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase ) # Tests require submodules as they have parent imports with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ): pass shutil.move( f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,) shutil.move( f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,) def remove_copy_lines(UpperCamelCase ): with open(UpperCamelCase ,"r" ) as f: snake_case__ :List[str] = f.readlines() with open(UpperCamelCase ,"w" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(UpperCamelCase ) if output_pytorch: if not self._testing: remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' ) if output_tensorflow: if not self._testing: remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ) if output_flax: if not self._testing: remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ) shutil.move( f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,) shutil.move( f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ): # Create temp file snake_case__ , snake_case__ :Optional[Any] = mkstemp() snake_case__ :Optional[Any] = False with fdopen(UpperCamelCase ,"w" ) as new_file: with open(UpperCamelCase ) as old_file: for line in old_file: new_file.write(UpperCamelCase ) if line_to_copy_below in line: snake_case__ :Optional[Any] = True for line_to_copy in lines_to_copy: new_file.write(UpperCamelCase ) if not line_found: raise ValueError(f'Line {line_to_copy_below} was not found in file.' ) # Copy the file permissions from the old file to the new file copymode(UpperCamelCase ,UpperCamelCase ) # Remove original file remove(UpperCamelCase ) # Move new file move(UpperCamelCase ,UpperCamelCase ) def skip_units(UpperCamelCase ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(UpperCamelCase ): with open(UpperCamelCase ) as datafile: snake_case__ :int = [] snake_case__ :Optional[int] = False snake_case__ :List[str] = False for line in datafile: if "# To replace in: " in line and "##" not in line: snake_case__ :Optional[Any] = line.split("\"" )[1] snake_case__ :Tuple = skip_units(UpperCamelCase ) elif "# Below: " in line and "##" not in line: snake_case__ :Optional[Any] = line.split("\"" )[1] snake_case__ :List[str] = skip_units(UpperCamelCase ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) snake_case__ :Tuple = [] elif "# Replace with" in line and "##" not in line: snake_case__ :Optional[Any] = [] elif "##" not in line: lines_to_copy.append(UpperCamelCase ) remove(UpperCamelCase ) replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' ) os.rmdir(UpperCamelCase )
57
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _snake_case : _A = PegasusConfig _A = {} _A = 'gelu' def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=7 ,UpperCamelCase=True ,UpperCamelCase=False ,UpperCamelCase=99 ,UpperCamelCase=32 ,UpperCamelCase=2 ,UpperCamelCase=4 ,UpperCamelCase=37 ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=40 ,UpperCamelCase=2 ,UpperCamelCase=1 ,UpperCamelCase=0 ,) -> int: snake_case__ :Tuple = parent snake_case__ :Any = batch_size snake_case__ :Optional[Any] = seq_length snake_case__ :Tuple = is_training snake_case__ :Union[str, Any] = use_labels snake_case__ :int = vocab_size snake_case__ :Dict = hidden_size snake_case__ :List[Any] = num_hidden_layers snake_case__ :Any = num_attention_heads snake_case__ :str = intermediate_size snake_case__ :int = hidden_dropout_prob snake_case__ :Optional[Any] = attention_probs_dropout_prob snake_case__ :List[str] = max_position_embeddings snake_case__ :Dict = eos_token_id snake_case__ :Optional[int] = pad_token_id snake_case__ :Any = bos_token_id def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :int = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) snake_case__ :Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) snake_case__ :List[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 ) snake_case__ :Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case__ :str = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) snake_case__ :Optional[Any] = prepare_pegasus_inputs_dict(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) return config, inputs_dict def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]: snake_case__ :List[str] = TFPegasusModel(config=UpperCamelCase ).get_decoder() snake_case__ :Optional[Any] = inputs_dict["input_ids"] snake_case__ :str = input_ids[:1, :] snake_case__ :Tuple = inputs_dict["attention_mask"][:1, :] snake_case__ :Any = inputs_dict["head_mask"] snake_case__ :Any = 1 # first forward pass snake_case__ :Any = model(UpperCamelCase ,attention_mask=UpperCamelCase ,head_mask=UpperCamelCase ,use_cache=UpperCamelCase ) snake_case__ , snake_case__ :Optional[int] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ :Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) snake_case__ :Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and snake_case__ :Optional[Any] = tf.concat([input_ids, next_tokens] ,axis=-1 ) snake_case__ :List[Any] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) snake_case__ :Optional[int] = model(UpperCamelCase ,attention_mask=UpperCamelCase )[0] snake_case__ :str = model(UpperCamelCase ,attention_mask=UpperCamelCase ,past_key_values=UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice snake_case__ :Optional[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) snake_case__ :str = output_from_no_past[:, -3:, random_slice_idx] snake_case__ :str = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase ,UpperCamelCase ,rtol=1E-3 ) def lowercase_ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Dict=None , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=None , ) -> Tuple: '''simple docstring''' if attention_mask is None: snake_case__ :List[Any] = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ :Dict = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ :Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ :str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ :Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _snake_case ( _A , _A , unittest.TestCase ): _A = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _A = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _A = ( { 'conversational': TFPegasusForConditionalGeneration, 'feature-extraction': TFPegasusModel, 'summarization': TFPegasusForConditionalGeneration, 'text2text-generation': TFPegasusForConditionalGeneration, 'translation': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _A = True _A = False _A = False def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :Dict = TFPegasusModelTester(self ) snake_case__ :Any = ConfigTester(self ,config_class=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Any: self.config_tester.run_common_tests() def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class _snake_case ( unittest.TestCase ): _A = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] _A = [ 'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to' ' reduce the risk of wildfires.', 'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.', ] # differs slightly from pytorch, likely due to numerical differences in linear layers _A = 'google/pegasus-xsum' @cached_property def lowerCAmelCase_ ( self ) -> Tuple: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> Any: snake_case__ :Optional[Any] = self.translate_src_text(**UpperCamelCase ) assert self.expected_text == generated_words def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> str: snake_case__ :Dict = self.tokenizer(self.src_text ,**UpperCamelCase ,padding=UpperCamelCase ,return_tensors="tf" ) snake_case__ :str = self.model.generate( model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 ,use_cache=UpperCamelCase ,) snake_case__ :int = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=UpperCamelCase ) return generated_words @slow def lowerCAmelCase_ ( self ) -> Any: self._assert_generated_batch_equal_expected()
57
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer __UpperCAmelCase : str = logging.get_logger(__name__) __UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : List[Any] = { "vocab_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json" }, "merges_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt" }, } __UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4} __UpperCAmelCase : List[str] = {} class _snake_case ( _A ): _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_INIT_CONFIGURATION _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = HerbertTokenizer def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict: super().__init__( UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]: snake_case__ :Optional[int] = [self.cls_token_id] snake_case__ :Any = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1] def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]: snake_case__ :Any = [self.sep_token_id] snake_case__ :Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]: snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase ) return tuple(UpperCamelCase )
57
1
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __UpperCAmelCase : List[str] = TypeVar("T") class _snake_case ( Generic[T] ): def __init__( self ,UpperCamelCase ,UpperCamelCase ) -> None: snake_case__ :Any | T = None snake_case__ :int = len(UpperCamelCase ) snake_case__ :list[T] = [any_type for _ in range(self.N )] + arr snake_case__ :Optional[Any] = fnc self.build() def lowerCAmelCase_ ( self ) -> None: for p in range(self.N - 1 ,0 ,-1 ): snake_case__ :Optional[Any] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> None: p += self.N snake_case__ :Optional[Any] = v while p > 1: snake_case__ :List[str] = p // 2 snake_case__ :List[Any] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> T | None: # noqa: E741 snake_case__ , snake_case__ :List[str] = l + self.N, r + self.N snake_case__ :T | None = None while l <= r: if l % 2 == 1: snake_case__ :List[str] = self.st[l] if res is None else self.fn(UpperCamelCase ,self.st[l] ) if r % 2 == 0: snake_case__ :Union[str, Any] = self.st[r] if res is None else self.fn(UpperCamelCase ,self.st[r] ) snake_case__ , snake_case__ :List[str] = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __UpperCAmelCase : Optional[Any] = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2] __UpperCAmelCase : Dict = { 0: 7, 1: 2, 2: 6, 3: -1_4, 4: 5, 5: 4, 6: 7, 7: -1_0, 8: 9, 9: 1_0, 1_0: 1_2, 1_1: 1, } __UpperCAmelCase : Optional[int] = SegmentTree(test_array, min) __UpperCAmelCase : Dict = SegmentTree(test_array, max) __UpperCAmelCase : Dict = SegmentTree(test_array, lambda a, b: a + b) def lowercase_ ( ) -> None: '''simple docstring''' for i in range(len(__snake_case ) ): for j in range(__snake_case , len(__snake_case ) ): snake_case__ :Optional[Any] = reduce(__snake_case , test_array[i : j + 1] ) snake_case__ :str = reduce(__snake_case , test_array[i : j + 1] ) snake_case__ :List[str] = reduce(lambda __snake_case , __snake_case : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__snake_case , __snake_case ) assert max_range == max_segment_tree.query(__snake_case , __snake_case ) assert sum_range == sum_segment_tree.query(__snake_case , __snake_case ) test_all_segments() for index, value in test_updates.items(): __UpperCAmelCase : Optional[Any] = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
57
def lowercase_ ( __snake_case : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError("p should not be less than 2!" ) elif p == 2: return True snake_case__ :List[str] = 4 snake_case__ :Optional[int] = (1 << p) - 1 for _ in range(p - 2 ): snake_case__ :List[Any] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(1_1))
57
1
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _snake_case : @staticmethod def lowerCAmelCase_ ( *UpperCamelCase ,**UpperCamelCase ) -> List[str]: pass @is_pipeline_test @require_torch @require_vision class _snake_case ( unittest.TestCase ): _A = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[Any]: snake_case__ :str = pipeline("visual-question-answering" ,model="hf-internal-testing/tiny-vilt-random-vqa" ) snake_case__ :str = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> List[str]: snake_case__ :Dict = vqa_pipeline(UpperCamelCase ,top_k=1 ) self.assertEqual( UpperCamelCase ,[ [{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase )}], [{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase )}], ] ,) @require_torch def lowerCAmelCase_ ( self ) -> int: snake_case__ :List[Any] = pipeline("visual-question-answering" ,model="hf-internal-testing/tiny-vilt-random-vqa" ) snake_case__ :Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png" snake_case__ :Optional[int] = "How many cats are there?" snake_case__ :str = vqa_pipeline(image=UpperCamelCase ,question="How many cats are there?" ,top_k=2 ) self.assertEqual( UpperCamelCase ,[{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase )}, {"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase )}] ) snake_case__ :Optional[Any] = vqa_pipeline({"image": image, "question": question} ,top_k=2 ) self.assertEqual( UpperCamelCase ,[{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase )}, {"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase )}] ) @slow @require_torch def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :List[str] = pipeline("visual-question-answering" ,model="dandelin/vilt-b32-finetuned-vqa" ) snake_case__ :Tuple = "./tests/fixtures/tests_samples/COCO/000000039769.png" snake_case__ :Union[str, Any] = "How many cats are there?" snake_case__ :List[Any] = vqa_pipeline(image=UpperCamelCase ,question=UpperCamelCase ,top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase ,decimals=4 ) ,[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) snake_case__ :Optional[Any] = vqa_pipeline({"image": image, "question": question} ,top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase ,decimals=4 ) ,[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) snake_case__ :Tuple = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] ,top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase ,decimals=4 ) ,[[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 ,) @require_tf @unittest.skip("Visual question answering not implemented in TF" ) def lowerCAmelCase_ ( self ) -> List[Any]: pass
57
from typing import Any def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list: '''simple docstring''' _validation( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # Creates data structures and fill initial step snake_case__ :dict = {} snake_case__ :dict = {} for state in states_space: snake_case__ :List[Any] = observations_space[0] snake_case__ :str = ( initial_probabilities[state] * emission_probabilities[state][observation] ) snake_case__ :str = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__snake_case ) ): snake_case__ :Any = observations_space[o] snake_case__ :Tuple = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function snake_case__ :Tuple = "" snake_case__ :Union[str, Any] = -1 for k_state in states_space: snake_case__ :int = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: snake_case__ :str = probability snake_case__ :Tuple = k_state # Update probabilities and pointers dicts snake_case__ :List[str] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) snake_case__ :List[str] = arg_max # The final observation snake_case__ :str = observations_space[len(__snake_case ) - 1] # argmax for given final observation snake_case__ :Optional[int] = "" snake_case__ :List[str] = -1 for k_state in states_space: snake_case__ :List[str] = probabilities[(k_state, final_observation)] if probability > max_probability: snake_case__ :List[str] = probability snake_case__ :int = k_state snake_case__ :Any = arg_max # Process pointers backwards snake_case__ :int = last_state snake_case__ :List[str] = [] for o in range(len(__snake_case ) - 1 , -1 , -1 ): result.append(__snake_case ) snake_case__ :List[str] = pointers[previous, observations_space[o]] result.reverse() return result def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' _validate_not_empty( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) _validate_lists(__snake_case , __snake_case ) _validate_dicts( __snake_case , __snake_case , __snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None: '''simple docstring''' _validate_list(__snake_case , "observations_space" ) _validate_list(__snake_case , "states_space" ) def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None: '''simple docstring''' if not isinstance(_object , __snake_case ): snake_case__ :Optional[int] = F'{var_name} must be a list' raise ValueError(__snake_case ) else: for x in _object: if not isinstance(__snake_case , __snake_case ): snake_case__ :Any = F'{var_name} must be a list of strings' raise ValueError(__snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' _validate_dict(__snake_case , "initial_probabilities" , __snake_case ) _validate_nested_dict(__snake_case , "transition_probabilities" ) _validate_nested_dict(__snake_case , "emission_probabilities" ) def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None: '''simple docstring''' _validate_dict(_object , __snake_case , __snake_case ) for x in _object.values(): _validate_dict(__snake_case , __snake_case , __snake_case , __snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None: '''simple docstring''' if not isinstance(_object , __snake_case ): snake_case__ :str = F'{var_name} must be a dict' raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object ): snake_case__ :List[Any] = F'{var_name} all keys must be strings' raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ): snake_case__ :Optional[int] = "nested dictionary " if nested else "" snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(__snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
57
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase_ ( *__snake_case : Tuple ) -> str: '''simple docstring''' with open(__snake_case , "r" ) as fh: fcntl.flock(__snake_case , fcntl.LOCK_EX ) try: print(*__snake_case ) finally: fcntl.flock(__snake_case , fcntl.LOCK_UN ) __UpperCAmelCase : Optional[int] = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __UpperCAmelCase : Union[str, Any] = torch.device("cuda", local_rank) __UpperCAmelCase : str = socket.gethostname() __UpperCAmelCase : Optional[int] = F'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __UpperCAmelCase : str = dist.get_rank() __UpperCAmelCase : Optional[Any] = dist.get_world_size() printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(F'''{gpu} is broken''') raise
57
def lowercase_ ( __snake_case : str ) -> list: '''simple docstring''' return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(__snake_case ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
57
1
import math from collections.abc import Iterator from itertools import takewhile def lowercase_ ( __snake_case : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase_ ( ) -> Iterator[int]: '''simple docstring''' snake_case__ :str = 2 while True: if is_prime(__snake_case ): yield num num += 1 def lowercase_ ( __snake_case : int = 2_00_00_00 ) -> int: '''simple docstring''' return sum(takewhile(lambda __snake_case : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
57
def lowercase_ ( __snake_case : int = 10_00 ) -> int: '''simple docstring''' snake_case__ :int = 3 snake_case__ :int = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
57
1
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase_ ( __snake_case : str , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : str ) -> List[Any]: '''simple docstring''' with open(__snake_case ) as metadata_file: snake_case__ :str = json.load(__snake_case ) snake_case__ :Optional[Any] = LukeConfig(use_entity_aware_attention=__snake_case , **metadata["model_config"] ) # Load in the weights from the checkpoint_path snake_case__ :Any = torch.load(__snake_case , map_location="cpu" )["module"] # Load the entity vocab file snake_case__ :Optional[int] = load_original_entity_vocab(__snake_case ) # add an entry for [MASK2] snake_case__ :Any = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 snake_case__ :Dict = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks snake_case__ :List[Any] = AddedToken("<ent>" , lstrip=__snake_case , rstrip=__snake_case ) snake_case__ :List[Any] = AddedToken("<ent2>" , lstrip=__snake_case , rstrip=__snake_case ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(__snake_case ) with open(os.path.join(__snake_case , "tokenizer_config.json" ) , "r" ) as f: snake_case__ :str = json.load(__snake_case ) snake_case__ :Optional[Any] = "MLukeTokenizer" with open(os.path.join(__snake_case , "tokenizer_config.json" ) , "w" ) as f: json.dump(__snake_case , __snake_case ) with open(os.path.join(__snake_case , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(__snake_case , __snake_case ) snake_case__ :List[Any] = MLukeTokenizer.from_pretrained(__snake_case ) # Initialize the embeddings of the special tokens snake_case__ :List[str] = tokenizer.convert_tokens_to_ids(["@"] )[0] snake_case__ :Optional[Any] = tokenizer.convert_tokens_to_ids(["#"] )[0] snake_case__ :Tuple = state_dict["embeddings.word_embeddings.weight"] snake_case__ :Dict = word_emb[ent_init_index].unsqueeze(0 ) snake_case__ :Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 ) snake_case__ :Any = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: snake_case__ :Dict = state_dict[bias_name] snake_case__ :Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 ) snake_case__ :str = decoder_bias[enta_init_index].unsqueeze(0 ) snake_case__ :Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: snake_case__ :int = F'encoder.layer.{layer_index}.attention.self.' snake_case__ :Dict = state_dict[prefix + matrix_name] snake_case__ :Optional[Any] = state_dict[prefix + matrix_name] snake_case__ :Tuple = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks snake_case__ :List[str] = state_dict["entity_embeddings.entity_embeddings.weight"] snake_case__ :Dict = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) snake_case__ :List[Any] = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' snake_case__ :Union[str, Any] = state_dict["entity_predictions.bias"] snake_case__ :Tuple = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) snake_case__ :Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) snake_case__ :List[Any] = LukeForMaskedLM(config=__snake_case ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) snake_case__ :str = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): snake_case__ :Tuple = state_dict[key] else: snake_case__ :Optional[int] = state_dict[key] snake_case__ , snake_case__ :str = model.load_state_dict(__snake_case , strict=__snake_case ) if set(__snake_case ) != {"luke.embeddings.position_ids"}: raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' ) if set(__snake_case ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs snake_case__ :int = MLukeTokenizer.from_pretrained(__snake_case , task="entity_classification" ) snake_case__ :Any = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." snake_case__ :str = (0, 9) snake_case__ :Optional[Any] = tokenizer(__snake_case , entity_spans=[span] , return_tensors="pt" ) snake_case__ :List[Any] = model(**__snake_case ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base snake_case__ :int = torch.Size((1, 33, 7_68) ) snake_case__ :List[Any] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base snake_case__ :int = torch.Size((1, 1, 7_68) ) snake_case__ :Optional[int] = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' F' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction snake_case__ :Union[str, Any] = MLukeTokenizer.from_pretrained(__snake_case ) snake_case__ :Optional[int] = "Tokyo is the capital of <mask>." snake_case__ :List[Any] = (24, 30) snake_case__ :Any = tokenizer(__snake_case , entity_spans=[span] , return_tensors="pt" ) snake_case__ :Optional[Any] = model(**__snake_case ) snake_case__ :int = encoding["input_ids"][0].tolist() snake_case__ :Optional[int] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) snake_case__ :Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(__snake_case ) snake_case__ :Optional[Any] = outputs.entity_logits[0][0].argmax().item() snake_case__ :List[Any] = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(__snake_case ) ) model.save_pretrained(__snake_case ) def lowercase_ ( __snake_case : Any ) -> Optional[Any]: '''simple docstring''' snake_case__ :List[Any] = ["[MASK]", "[PAD]", "[UNK]"] snake_case__ :List[Any] = [json.loads(__snake_case ) for line in open(__snake_case )] snake_case__ :Tuple = {} for entry in data: snake_case__ :str = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: snake_case__ :List[str] = entity_id break snake_case__ :Optional[Any] = F'{language}:{entity_name}' snake_case__ :Dict = entity_id return new_mapping if __name__ == "__main__": __UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) __UpperCAmelCase : Tuple = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
57
import os import sys import unittest __UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers") class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Tuple = find_backend(" if not is_torch_available():" ) self.assertEqual(UpperCamelCase ,"torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(UpperCamelCase ,"torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") snake_case__ :str = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :int = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" ,UpperCamelCase ) self.assertIn("torch_and_transformers" ,UpperCamelCase ) self.assertIn("flax_and_transformers" ,UpperCamelCase ) self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" ,objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] ) self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] ) def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" ) self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" ) snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" ) self.assertEqual( UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" ) self.assertEqual(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
57
1
from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class _snake_case ( _A ): _A = 42 _A = 42 if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
57
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __UpperCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = ["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys __UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
1
import math class _snake_case : def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int: snake_case__ :List[Any] = 0.0 snake_case__ :Tuple = 0.0 for i in range(len(UpperCamelCase ) ): da += math.pow((sample[i] - weights[0][i]) ,2 ) da += math.pow((sample[i] - weights[1][i]) ,2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> list[list[int | float]]: for i in range(len(UpperCamelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def lowercase_ ( ) -> None: '''simple docstring''' snake_case__ :List[Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) snake_case__ :Tuple = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training snake_case__ :Optional[int] = SelfOrganizingMap() snake_case__ :Any = 3 snake_case__ :Dict = 0.5 for _ in range(__snake_case ): for j in range(len(__snake_case ) ): # training sample snake_case__ :List[Any] = training_samples[j] # Compute the winning vector snake_case__ :Tuple = self_organizing_map.get_winner(__snake_case , __snake_case ) # Update the winning vector snake_case__ :Union[str, Any] = self_organizing_map.update(__snake_case , __snake_case , __snake_case , __snake_case ) # classify test sample snake_case__ :Optional[int] = [0, 0, 0, 1] snake_case__ :int = self_organizing_map.get_winner(__snake_case , __snake_case ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
57
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: # A mock response for an HTTP head request to emulate server down snake_case__ :Tuple = mock.Mock() snake_case__ :List[str] = 500 snake_case__ :Any = {} snake_case__ :Union[str, Any] = HTTPError snake_case__ :Tuple = {} # Download this model to make sure it's in the cache. snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def lowerCAmelCase_ ( self ) -> Dict: # A mock response for an HTTP head request to emulate server down snake_case__ :Union[str, Any] = mock.Mock() snake_case__ :int = 500 snake_case__ :Any = {} snake_case__ :Dict = HTTPError snake_case__ :List[Any] = {} # Download this model to make sure it's in the cache. snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase_ ( self ) -> int: # This test is for deprecated behavior and can be removed in v5 try: snake_case__ :Union[str, Any] = tempfile.mktemp() with open(UpperCamelCase ,"wb" ) as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase ) snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase ) finally: os.remove(UpperCamelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json" ,"wb" ) as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase ) snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json" ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ) @is_staging_test class _snake_case ( unittest.TestCase ): _A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def lowerCAmelCase_ ( cls ) -> Optional[int]: snake_case__ :List[str] = TOKEN HfFolder.save_token(UpperCamelCase ) @classmethod def lowerCAmelCase_ ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token ,repo_id="test-tokenizer" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" ) except HTTPError: pass def lowerCAmelCase_ ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :str = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token ) snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="test-tokenizer" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def lowerCAmelCase_ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Any = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token ) snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def lowerCAmelCase_ ( self ) -> Any: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase ) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase ) bert_tokenizer.save_pretrained(UpperCamelCase ) snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase ) tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" ) snake_case__ :List[str] = AutoTokenizer.from_pretrained( f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :int = Trie() trie.add("Hello 友達" ) self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} ) trie.add("Hello" ) trie.data self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} ) def lowerCAmelCase_ ( self ) -> int: snake_case__ :List[str] = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] ) trie.add("[CLS]" ) trie.add("extra_id_1" ) trie.add("extra_id_100" ) self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Optional[Any] = Trie() trie.add("A" ) self.assertEqual(trie.split("ABC" ) ,["A", "BC"] ) self.assertEqual(trie.split("BCA" ) ,["BC", "A"] ) def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :Any = Trie() trie.add("TOKEN]" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :List[Any] = Trie() trie.add("A" ) trie.add("P" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :str = Trie() trie.add("AB" ) trie.add("B" ) trie.add("C" ) self.assertEqual(trie.split("ABC" ) ,["AB", "C"] ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Dict = Trie() trie.add("ABC" ) trie.add("B" ) trie.add("CD" ) self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] ) def lowerCAmelCase_ ( self ) -> int: # Even if the offsets are wrong, we necessarily output correct string # parts. snake_case__ :Optional[int] = Trie() snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(UpperCamelCase ,["AB", "C"] )
57
1
def lowercase_ ( __snake_case : list ) -> list: '''simple docstring''' if len(__snake_case ) < 2: return collection def circle_sort_util(__snake_case : list , __snake_case : int , __snake_case : int ) -> bool: snake_case__ :List[Any] = False if low == high: return swapped snake_case__ :List[Any] = low snake_case__ :List[Any] = high while left < right: if collection[left] > collection[right]: snake_case__ , snake_case__ :Any = ( collection[right], collection[left], ) snake_case__ :Tuple = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: snake_case__ , snake_case__ :Optional[Any] = ( collection[right + 1], collection[left], ) snake_case__ :Union[str, Any] = True snake_case__ :Optional[int] = low + int((high - low) / 2 ) snake_case__ :int = circle_sort_util(__snake_case , __snake_case , __snake_case ) snake_case__ :int = circle_sort_util(__snake_case , mid + 1 , __snake_case ) return swapped or left_swap or right_swap snake_case__ :Tuple = True while is_not_sorted is True: snake_case__ :Any = circle_sort_util(__snake_case , 0 , len(__snake_case ) - 1 ) return collection if __name__ == "__main__": __UpperCAmelCase : Dict = input("Enter numbers separated by a comma:\n").strip() __UpperCAmelCase : Dict = [int(item) for item in user_input.split(",")] print(circle_sort(unsorted))
57
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase : Optional[Any] = 1_6 __UpperCAmelCase : Optional[int] = 3_2 def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]: '''simple docstring''' snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case ) snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" ) def tokenize_function(__snake_case : Tuple ): # max_length=None => use the model max length (it's actually the default) snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case__ :List[Any] = datasets.map( __snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__snake_case : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. snake_case__ :Any = DataLoader( tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) snake_case__ :Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple: '''simple docstring''' model.eval() snake_case__ :Union[str, Any] = 0 for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case__ :List[Any] = model(**__snake_case ) snake_case__ :Any = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times snake_case__ , snake_case__ :Tuple = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__snake_case ) - 1: snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen] snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__snake_case , references=__snake_case , ) snake_case__ :int = metric.compute() return eval_metric["accuracy"] def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any: '''simple docstring''' snake_case__ :Any = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case__ :Union[str, Any] = config["lr"] snake_case__ :List[str] = int(config["num_epochs"] ) snake_case__ :Optional[Any] = int(config["seed"] ) snake_case__ :List[Any] = int(config["batch_size"] ) snake_case__ :List[Any] = args.model_name_or_path set_seed(__snake_case ) snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case ) # Instantiate optimizer snake_case__ :int = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case ) if accelerator.state.deepspeed_plugin is not None: snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: snake_case__ :Any = 1 snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case__ :Optional[Any] = get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , ) else: snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # We need to keep track of how many total steps we have iterated over snake_case__ :Dict = 0 # We also need to keep track of the stating epoch so files are named properly snake_case__ :Union[str, Any] = 0 snake_case__ :List[str] = evaluate.load("glue" , "mrpc" ) snake_case__ :Optional[Any] = num_epochs if args.partial_train_epoch is not None: snake_case__ :List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1] snake_case__ :Dict = "" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break snake_case__ :str = int(__snake_case ) + 1 snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) accelerator.print("resumed checkpoint performance:" , __snake_case ) accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] ) accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] ) with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f: snake_case__ :Tuple = json.load(__snake_case ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model snake_case__ :Optional[int] = {} for epoch in range(__snake_case , __snake_case ): model.train() for step, batch in enumerate(__snake_case ): snake_case__ :str = model(**__snake_case ) snake_case__ :List[str] = outputs.loss snake_case__ :List[Any] = loss / gradient_accumulation_steps accelerator.backward(__snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 snake_case__ :int = F'epoch_{epoch}' snake_case__ :str = os.path.join(args.output_dir , __snake_case ) accelerator.save_state(__snake_case ) snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) snake_case__ :List[str] = accuracy snake_case__ :List[str] = lr_scheduler.get_lr()[0] snake_case__ :List[Any] = optimizer.param_groups[0]["lr"] snake_case__ :Dict = epoch snake_case__ :List[Any] = overall_step accelerator.print(F'epoch {epoch}:' , __snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f: json.dump(__snake_case , __snake_case ) def lowercase_ ( ) -> Any: '''simple docstring''' snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , ) parser.add_argument( "--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , ) parser.add_argument( "--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , ) snake_case__ :Any = parser.parse_args() snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
57
1
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( _A , unittest.TestCase ): _A = CTRLTokenizer _A = False _A = False def lowerCAmelCase_ ( self ) -> int: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case__ :Tuple = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"] snake_case__ :Dict = dict(zip(UpperCamelCase ,range(len(UpperCamelCase ) ) ) ) snake_case__ :Dict = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""] snake_case__ :Tuple = {"unk_token": "<unk>"} snake_case__ :Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) snake_case__ :Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCamelCase ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(UpperCamelCase ) ) def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> Tuple: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int: snake_case__ :str = "adapt react readapt apt" snake_case__ :Union[str, Any] = "adapt react readapt apt" return input_text, output_text def lowerCAmelCase_ ( self ) -> str: snake_case__ :Dict = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) snake_case__ :Dict = "adapt react readapt apt" snake_case__ :Tuple = "adapt re@@ a@@ c@@ t re@@ adapt apt".split() snake_case__ :str = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase ,UpperCamelCase ) snake_case__ :Optional[Any] = tokens + [tokenizer.unk_token] snake_case__ :Tuple = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) ,UpperCamelCase )
57
from __future__ import annotations class _snake_case : def __init__( self ,UpperCamelCase ) -> None: snake_case__ :Union[str, Any] = data snake_case__ :Node | None = None snake_case__ :Node | None = None def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowercase_ ( __snake_case : Node | None ) -> int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowercase_ ( __snake_case : Node ) -> bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowercase_ ( ) -> None: # Main function for testing. '''simple docstring''' snake_case__ :Dict = Node(1 ) snake_case__ :int = Node(2 ) snake_case__ :Optional[Any] = Node(3 ) snake_case__ :Tuple = Node(4 ) snake_case__ :str = Node(5 ) snake_case__ :Optional[Any] = Node(6 ) snake_case__ :List[Any] = Node(7 ) snake_case__ :List[str] = Node(8 ) snake_case__ :Tuple = Node(9 ) print(is_full_binary_tree(__snake_case ) ) print(depth_of_tree(__snake_case ) ) print("Tree is: " ) display(__snake_case ) if __name__ == "__main__": main()
57
1
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _snake_case ( _A ): def lowerCAmelCase_ ( self ) -> List[str]: snake_case__ :Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase ,"hidden_sizes" ) ) self.parent.assertTrue(hasattr(UpperCamelCase ,"num_attention_heads" ) ) class _snake_case : def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=64 ,UpperCamelCase=3 ,UpperCamelCase=3 ,UpperCamelCase=2 ,UpperCamelCase=1 ,UpperCamelCase=16 ,UpperCamelCase=[128, 256, 384] ,UpperCamelCase=[4, 6, 8] ,UpperCamelCase=[2, 3, 4] ,UpperCamelCase=[16, 16, 16] ,UpperCamelCase=0 ,UpperCamelCase=[2, 2, 2] ,UpperCamelCase=[2, 2, 2] ,UpperCamelCase=0.02 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=2 ,) -> List[Any]: snake_case__ :Union[str, Any] = parent snake_case__ :str = batch_size snake_case__ :Optional[Any] = image_size snake_case__ :str = num_channels snake_case__ :Tuple = kernel_size snake_case__ :str = stride snake_case__ :Optional[int] = padding snake_case__ :List[Any] = hidden_sizes snake_case__ :List[str] = num_attention_heads snake_case__ :Any = depths snake_case__ :Union[str, Any] = key_dim snake_case__ :List[Any] = drop_path_rate snake_case__ :List[Any] = patch_size snake_case__ :List[str] = attention_ratio snake_case__ :List[Any] = mlp_ratio snake_case__ :Optional[Any] = initializer_range snake_case__ :Union[str, Any] = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] snake_case__ :List[Any] = is_training snake_case__ :Any = use_labels snake_case__ :str = num_labels snake_case__ :Union[str, Any] = initializer_range def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ :Any = None if self.use_labels: snake_case__ :Union[str, Any] = ids_tensor([self.batch_size] ,self.num_labels ) snake_case__ :str = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self ) -> List[Any]: return LevitConfig( image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int: snake_case__ :Dict = LevitModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ :int = model(UpperCamelCase ) snake_case__ :Tuple = (self.image_size, self.image_size) snake_case__ , snake_case__ :int = image_size[0], image_size[1] for _ in range(4 ): snake_case__ :int = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) snake_case__ :Optional[int] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict: snake_case__ :List[str] = self.num_labels snake_case__ :Any = LevitForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ :str = model(UpperCamelCase ,labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Dict = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ :List[str] = config_and_inputs snake_case__ :int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _snake_case ( _A , _A , unittest.TestCase ): _A = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) _A = ( { 'feature-extraction': LevitModel, 'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) _A = False _A = False _A = False _A = False _A = False def lowerCAmelCase_ ( self ) -> Any: snake_case__ :List[str] = LevitModelTester(self ) snake_case__ :Tuple = ConfigTester(self ,config_class=UpperCamelCase ,has_text_modality=UpperCamelCase ,hidden_size=37 ) def lowerCAmelCase_ ( self ) -> Any: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self ) -> Dict: return @unittest.skip(reason="Levit does not use inputs_embeds" ) def lowerCAmelCase_ ( self ) -> Any: pass @unittest.skip(reason="Levit does not support input and output embeddings" ) def lowerCAmelCase_ ( self ) -> Dict: pass @unittest.skip(reason="Levit does not output attentions" ) def lowerCAmelCase_ ( self ) -> int: pass def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ , snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ :Tuple = model_class(UpperCamelCase ) snake_case__ :Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ :Union[str, Any] = [*signature.parameters.keys()] snake_case__ :Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ): snake_case__ :Union[str, Any] = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): snake_case__ :int = model(**self._prepare_for_class(UpperCamelCase ,UpperCamelCase ) ) snake_case__ :Tuple = outputs.hidden_states snake_case__ :str = len(self.model_tester.depths ) + 1 self.assertEqual(len(UpperCamelCase ) ,UpperCamelCase ) snake_case__ :Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size) snake_case__ , snake_case__ :List[str] = image_size[0], image_size[1] for _ in range(4 ): snake_case__ :str = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) snake_case__ :List[str] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[ height * width, self.model_tester.hidden_sizes[0], ] ,) snake_case__ , snake_case__ :Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ :Dict = True check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ :Dict = True check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowerCAmelCase_ ( self ) -> Optional[Any]: pass def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> str: snake_case__ :Tuple = super()._prepare_for_class(UpperCamelCase ,UpperCamelCase ,return_labels=UpperCamelCase ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[str]: if not self.model_tester.is_training: return snake_case__ , snake_case__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ :int = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCamelCase ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue snake_case__ :Optional[int] = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.train() snake_case__ :Dict = self._prepare_for_class(UpperCamelCase ,UpperCamelCase ,return_labels=UpperCamelCase ) snake_case__ :List[Any] = model(**UpperCamelCase ).loss loss.backward() def lowerCAmelCase_ ( self ) -> Dict: snake_case__ , snake_case__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return snake_case__ :int = False snake_case__ :Any = True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue snake_case__ :str = model_class(UpperCamelCase ) model.gradient_checkpointing_enable() model.to(UpperCamelCase ) model.train() snake_case__ :Any = self._prepare_for_class(UpperCamelCase ,UpperCamelCase ,return_labels=UpperCamelCase ) snake_case__ :List[Any] = model(**UpperCamelCase ).loss loss.backward() def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ , snake_case__ :Dict = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ :Dict = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCamelCase ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ): snake_case__ :int = problem_type["title"] snake_case__ :Union[str, Any] = problem_type["num_labels"] snake_case__ :Any = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.train() snake_case__ :Optional[int] = self._prepare_for_class(UpperCamelCase ,UpperCamelCase ,return_labels=UpperCamelCase ) if problem_type["num_labels"] > 1: snake_case__ :Optional[int] = inputs["labels"].unsqueeze(1 ).repeat(1 ,problem_type["num_labels"] ) snake_case__ :Dict = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCamelCase ) as warning_list: snake_case__ :Optional[int] = model(**UpperCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'Something is going wrong in the regression problem: intercepted {w.message}' ) loss.backward() @slow def lowerCAmelCase_ ( self ) -> Optional[int]: for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ :List[Any] = LevitModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def lowercase_ ( ) -> Dict: '''simple docstring''' snake_case__ :Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def lowerCAmelCase_ ( self ) -> Tuple: return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Dict = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( UpperCamelCase ) snake_case__ :Union[str, Any] = self.default_image_processor snake_case__ :Optional[int] = prepare_img() snake_case__ :int = image_processor(images=UpperCamelCase ,return_tensors="pt" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): snake_case__ :Optional[int] = model(**UpperCamelCase ) # verify the logits snake_case__ :Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,UpperCamelCase ) snake_case__ :str = torch.tensor([1.0448, -0.3745, -1.8317] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCamelCase ,atol=1E-4 ) )
57
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __UpperCAmelCase : List[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __UpperCAmelCase : int = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("\n".join(upper_files) + "\n") __UpperCAmelCase : Any = [file for file in filepaths if " " in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("\n".join(space_files) + "\n") __UpperCAmelCase : str = [file for file in filepaths if "-" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("\n".join(hyphen_files) + "\n") __UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("\n".join(nodir_files) + "\n") __UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
57
1
from abc import ABC, abstractmethod from argparse import ArgumentParser class _snake_case ( _A ): @staticmethod @abstractmethod def lowerCAmelCase_ ( UpperCamelCase ) -> Optional[int]: raise NotImplementedError() @abstractmethod def lowerCAmelCase_ ( self ) -> Union[str, Any]: raise NotImplementedError()
57
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]: '''simple docstring''' snake_case__ :Dict = "" for i in table: res += inp[i - 1] return res def lowercase_ ( __snake_case : List[str] ) -> int: '''simple docstring''' return data[1:] + data[0] def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case__ :Union[str, Any] = "" for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case__ :int = int("0b" + data[0] + data[-1] , 2 ) snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]: '''simple docstring''' snake_case__ :Tuple = message[:4] snake_case__ :int = message[4:] snake_case__ :int = apply_table(__snake_case , __snake_case ) snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case ) snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741 snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] ) snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741 snake_case__ :int = "0" * (2 - len(__snake_case )) + r snake_case__ :Optional[Any] = apply_table(l + r , __snake_case ) snake_case__ :Tuple = xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": __UpperCAmelCase : Dict = input("Enter 10 bit key: ") __UpperCAmelCase : Tuple = input("Enter 8 bit message: ") __UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9] __UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] __UpperCAmelCase : Tuple = [2, 4, 3, 1] __UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7] __UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] __UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1] __UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __UpperCAmelCase : int = apply_table(key, paa_table) __UpperCAmelCase : Dict = temp[:5] __UpperCAmelCase : Optional[int] = temp[5:] __UpperCAmelCase : Optional[int] = left_shift(left) __UpperCAmelCase : Union[str, Any] = left_shift(right) __UpperCAmelCase : int = apply_table(left + right, pa_table) __UpperCAmelCase : Tuple = left_shift(left) __UpperCAmelCase : Union[str, Any] = left_shift(right) __UpperCAmelCase : Dict = left_shift(left) __UpperCAmelCase : Optional[Any] = left_shift(right) __UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table) # encryption __UpperCAmelCase : Tuple = apply_table(message, IP) __UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : List[Any] = temp[4:] + temp[:4] __UpperCAmelCase : int = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption __UpperCAmelCase : List[Any] = apply_table(CT, IP) __UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : int = temp[4:] + temp[:4] __UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
57
1
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def lowercase_ ( __snake_case : Optional[Any]=None ) -> str: '''simple docstring''' if subparsers is not None: snake_case__ :Optional[Any] = subparsers.add_parser("env" ) else: snake_case__ :List[str] = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" , default=__snake_case , help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=__snake_case ) return parser def lowercase_ ( __snake_case : int ) -> int: '''simple docstring''' snake_case__ :Dict = torch.__version__ snake_case__ :int = torch.cuda.is_available() snake_case__ :str = is_xpu_available() snake_case__ :List[str] = is_npu_available() snake_case__ :List[str] = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(__snake_case ): snake_case__ :List[Any] = load_config_from_file(args.config_file ).to_dict() snake_case__ :Union[str, Any] = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})', "PyTorch XPU available": str(__snake_case ), "PyTorch NPU available": str(__snake_case ), "System RAM": F'{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB', } if pt_cuda_available: snake_case__ :Dict = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([F'- {prop}: {val}' for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) snake_case__ :List[str] = ( "\n".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] ) if isinstance(__snake_case , __snake_case ) else F'\t{accelerate_config}' ) print(__snake_case ) snake_case__ :str = accelerate_config return info def lowercase_ ( ) -> int: '''simple docstring''' snake_case__ :Union[str, Any] = env_command_parser() snake_case__ :Union[str, Any] = parser.parse_args() env_command(__snake_case ) return 0 if __name__ == "__main__": raise SystemExit(main())
57
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _snake_case ( _A , _A , _A ): @register_to_config def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int: super().__init__() snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :Any = False snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase ) snake_case__ :Tuple = TaConfig( vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,) snake_case__ :List[str] = nn.ModuleList() for lyr_num in range(UpperCamelCase ): snake_case__ :List[Any] = TaBlock(UpperCamelCase ) self.encoders.append(UpperCamelCase ) snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase ) snake_case__ :Any = nn.Dropout(p=UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int: snake_case__ :str = self.token_embedder(UpperCamelCase ) snake_case__ :int = encoder_input_tokens.shape[1] snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase ) snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase ) # inverted the attention mask snake_case__ :Optional[Any] = encoder_input_tokens.size() snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase ) for lyr in self.encoders: snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0] snake_case__ :List[Any] = self.layer_norm(UpperCamelCase ) return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
57
1
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowercase_ ( __snake_case : Optional[int] ) -> Optional[int]: '''simple docstring''' if ( (cp >= 0X4_e00 and cp <= 0X9_fff) or (cp >= 0X3_400 and cp <= 0X4_dbf) # or (cp >= 0X20_000 and cp <= 0X2a_6df) # or (cp >= 0X2a_700 and cp <= 0X2b_73f) # or (cp >= 0X2b_740 and cp <= 0X2b_81f) # or (cp >= 0X2b_820 and cp <= 0X2c_eaf) # or (cp >= 0Xf_900 and cp <= 0Xf_aff) or (cp >= 0X2f_800 and cp <= 0X2f_a1f) # ): # return True return False def lowercase_ ( __snake_case : str ) -> Union[str, Any]: '''simple docstring''' for char in word: snake_case__ :Optional[Any] = ord(__snake_case ) if not _is_chinese_char(__snake_case ): return 0 return 1 def lowercase_ ( __snake_case : List[str] ) -> Any: '''simple docstring''' snake_case__ :List[Any] = set() for token in tokens: snake_case__ :Optional[Any] = len(__snake_case ) > 1 and is_chinese(__snake_case ) if chinese_word: word_set.add(__snake_case ) snake_case__ :List[str] = list(__snake_case ) return word_list def lowercase_ ( __snake_case : List[str] , __snake_case : set() ) -> Dict: '''simple docstring''' if not chinese_word_set: return bert_tokens snake_case__ :Dict = max([len(__snake_case ) for w in chinese_word_set] ) snake_case__ :int = bert_tokens snake_case__ , snake_case__ :Optional[Any] = 0, len(__snake_case ) while start < end: snake_case__ :int = True if is_chinese(bert_word[start] ): snake_case__ :Any = min(end - start , __snake_case ) for i in range(__snake_case , 1 , -1 ): snake_case__ :List[Any] = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): snake_case__ :int = "##" + bert_word[j] snake_case__ :Optional[int] = start + i snake_case__ :Tuple = False break if single_word: start += 1 return bert_word def lowercase_ ( __snake_case : List[str] , __snake_case : LTP , __snake_case : BertTokenizer ) -> Optional[Any]: '''simple docstring''' snake_case__ :List[str] = [] for i in range(0 , len(__snake_case ) , 1_00 ): snake_case__ :Dict = ltp_tokenizer.seg(lines[i : i + 1_00] )[0] snake_case__ :Dict = [get_chinese_word(__snake_case ) for r in res] ltp_res.extend(__snake_case ) assert len(__snake_case ) == len(__snake_case ) snake_case__ :List[Any] = [] for i in range(0 , len(__snake_case ) , 1_00 ): snake_case__ :List[str] = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=5_12 ) bert_res.extend(res["input_ids"] ) assert len(__snake_case ) == len(__snake_case ) snake_case__ :Optional[Any] = [] for input_ids, chinese_word in zip(__snake_case , __snake_case ): snake_case__ :str = [] for id in input_ids: snake_case__ :List[Any] = bert_tokenizer._convert_id_to_token(__snake_case ) input_tokens.append(__snake_case ) snake_case__ :Union[str, Any] = add_sub_symbol(__snake_case , __snake_case ) snake_case__ :Optional[int] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__snake_case ): if token[:2] == "##": snake_case__ :Dict = token[2:] # save chinese tokens' pos if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ): ref_id.append(__snake_case ) ref_ids.append(__snake_case ) assert len(__snake_case ) == len(__snake_case ) return ref_ids def lowercase_ ( __snake_case : List[Any] ) -> List[str]: '''simple docstring''' with open(args.file_name , "r" , encoding="utf-8" ) as f: snake_case__ :Dict = f.readlines() snake_case__ :Dict = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' snake_case__ :str = LTP(args.ltp ) # faster in GPU device snake_case__ :List[str] = BertTokenizer.from_pretrained(args.bert ) snake_case__ :List[Any] = prepare_ref(__snake_case , __snake_case , __snake_case ) with open(args.save_path , "w" , encoding="utf-8" ) as f: snake_case__ :int = [json.dumps(__snake_case ) + "\n" for ref in ref_ids] f.writelines(__snake_case ) if __name__ == "__main__": __UpperCAmelCase : Optional[int] = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path" ) parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer") parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res") __UpperCAmelCase : List[Any] = parser.parse_args() main(args)
57
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} __UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"] def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]: '''simple docstring''' snake_case__ :List[Any] = start # add current to visited visited.append(__snake_case ) snake_case__ :List[str] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # if all neighbors visited add current to sort sort.append(__snake_case ) # if all vertices haven't been visited select a new one to visit if len(__snake_case ) != len(__snake_case ): for vertice in vertices: if vertice not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # return sort return sort if __name__ == "__main__": __UpperCAmelCase : Tuple = topological_sort("a", [], []) print(sort)
57
1
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __UpperCAmelCase : List[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __UpperCAmelCase : int = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("\n".join(upper_files) + "\n") __UpperCAmelCase : Any = [file for file in filepaths if " " in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("\n".join(space_files) + "\n") __UpperCAmelCase : str = [file for file in filepaths if "-" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("\n".join(hyphen_files) + "\n") __UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("\n".join(nodir_files) + "\n") __UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
57
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self ) -> str: snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ :List[str] = controlnet_params snake_case__ :Union[str, Any] = "bird" snake_case__ :Optional[int] = jax.device_count() snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case__ :Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples ) snake_case__ :List[str] = jax.random.PRNGKey(0 ) snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() ) snake_case__ :int = replicate(UpperCamelCase ) snake_case__ :Any = shard(UpperCamelCase ) snake_case__ :Any = shard(UpperCamelCase ) snake_case__ :str = pipe( prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case__ :Any = images[0, 253:256, 253:256, -1] snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case__ :List[Any] = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ :str = controlnet_params snake_case__ :int = "Chef in the kitchen" snake_case__ :List[Any] = jax.device_count() snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case__ :Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples ) snake_case__ :List[str] = jax.random.PRNGKey(0 ) snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() ) snake_case__ :Dict = replicate(UpperCamelCase ) snake_case__ :Tuple = shard(UpperCamelCase ) snake_case__ :Optional[int] = shard(UpperCamelCase ) snake_case__ :Optional[Any] = pipe( prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case__ :List[str] = images[0, 253:256, 253:256, -1] snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case__ :List[str] = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
57
1
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __UpperCAmelCase : Any = logging.get_logger(__name__) __UpperCAmelCase : Optional[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __UpperCAmelCase : List[Any] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowercase_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Union[str, Any] ) -> str: '''simple docstring''' for attribute in key.split("." ): snake_case__ :Union[str, Any] = getattr(__snake_case , __snake_case ) if weight_type is not None: snake_case__ :Optional[Any] = getattr(__snake_case , __snake_case ).shape else: snake_case__ :str = hf_pointer.shape assert hf_shape == value.shape, ( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": snake_case__ :Optional[int] = value elif weight_type == "weight_g": snake_case__ :Union[str, Any] = value elif weight_type == "weight_v": snake_case__ :Any = value elif weight_type == "bias": snake_case__ :int = value else: snake_case__ :Optional[int] = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowercase_ ( __snake_case : Any , __snake_case : Optional[int] ) -> List[str]: '''simple docstring''' snake_case__ :Any = [] snake_case__ :Any = fairseq_model.state_dict() snake_case__ :Tuple = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight snake_case__ :str = None for name, value in fairseq_dict.items(): snake_case__ :str = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , ) snake_case__ :int = True elif name.split("." )[0] == "proj": snake_case__ :Tuple = fairseq_model.proj snake_case__ :Optional[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: snake_case__ :Optional[int] = True if "*" in mapped_key: snake_case__ :Tuple = name.split(__snake_case )[0].split("." )[-2] snake_case__ :Tuple = mapped_key.replace("*" , __snake_case ) if "weight_g" in name: snake_case__ :Dict = "weight_g" elif "weight_v" in name: snake_case__ :Union[str, Any] = "weight_v" elif "bias" in name: snake_case__ :Optional[int] = "bias" elif "weight" in name: snake_case__ :List[Any] = "weight" else: snake_case__ :Any = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(F'Unused weights: {unused_weights}' ) return proj_weight def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Dict ) -> List[str]: '''simple docstring''' snake_case__ :Tuple = full_name.split("conv_layers." )[-1] snake_case__ :Optional[int] = name.split("." ) snake_case__ :Tuple = int(items[0] ) snake_case__ :List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) snake_case__ :Tuple = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) snake_case__ :List[Any] = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) snake_case__ :Any = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) snake_case__ :Dict = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(__snake_case ) def lowercase_ ( __snake_case : int ) -> Dict: '''simple docstring''' snake_case__ , snake_case__ :Optional[int] = emb.weight.shape snake_case__ :Union[str, Any] = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) snake_case__ :str = emb.weight.data return lin_layer def lowercase_ ( __snake_case : Any ) -> Optional[Any]: '''simple docstring''' with open(__snake_case , "r" , encoding="utf-8" ) as f: snake_case__ :Union[str, Any] = f.readlines() snake_case__ :Dict = [line.split(" " )[0] for line in lines] snake_case__ :Union[str, Any] = len(__snake_case ) snake_case__ :int = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def lowercase_ ( __snake_case : Dict , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : int , __snake_case : List[str] , ) -> str: '''simple docstring''' snake_case__ :List[str] = WavaVecaConfig.from_pretrained(__snake_case ) snake_case__ :Optional[int] = SpeechaTextaConfig.from_pretrained( __snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case ) snake_case__ :Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) snake_case__ , snake_case__ , snake_case__ :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) snake_case__ :Optional[int] = model[0].eval() # set weights for wav2vec2 encoder snake_case__ :str = WavaVecaModel(__snake_case ) snake_case__ :Optional[int] = recursively_load_weights_wavaveca(model.encoder , __snake_case ) snake_case__ :Union[str, Any] = SpeechaTextaForCausalLM(__snake_case ) snake_case__ , snake_case__ :Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case ) # set output linear layer unexpected_keys.remove("embed_out" ) snake_case__ :int = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' ) logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' ) snake_case__ :List[Any] = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case ) snake_case__ :List[Any] = False # add projection layer snake_case__ :int = nn.Parameter(projection_layer.weight ) snake_case__ :Optional[int] = nn.Parameter(projection_layer.bias ) snake_case__ :Union[str, Any] = create_vocab_dict(__snake_case ) with open(os.path.join(__snake_case , "vocab.json" ) , "w" ) as fp: json.dump(__snake_case , __snake_case ) snake_case__ :Tuple = SpeechaTextaTokenizer(os.path.join(__snake_case , "vocab.json" ) ) tokenizer.save_pretrained(__snake_case ) snake_case__ :Dict = hf_wavavec.config.to_dict() snake_case__ :int = tokenizer.pad_token_id snake_case__ :Union[str, Any] = tokenizer.bos_token_id snake_case__ :Tuple = tokenizer.eos_token_id snake_case__ :Optional[Any] = "speech_to_text_2" snake_case__ :Optional[int] = "wav2vec2" snake_case__ :Union[str, Any] = SpeechEncoderDecoderConfig.from_dict(__snake_case ) hf_wavavec.save_pretrained(__snake_case ) feature_extractor.save_pretrained(__snake_case ) if __name__ == "__main__": __UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") __UpperCAmelCase : Union[str, Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
57
def lowercase_ ( __snake_case : list ) -> list: '''simple docstring''' if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ): raise TypeError("Sequence must be list of non-negative integers" ) for _ in range(len(__snake_case ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
57
1
__UpperCAmelCase : Any = "0.18.2" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
57
from __future__ import annotations def lowercase_ ( __snake_case : list ) -> float: '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(__snake_case ) / len(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
57
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ :Tuple = tempfile.mkdtemp() # fmt: off snake_case__ :List[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on snake_case__ :Optional[Any] = dict(zip(UpperCamelCase ,range(len(UpperCamelCase ) ) ) ) snake_case__ :Any = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] snake_case__ :Optional[int] = {"unk_token": "<unk>"} snake_case__ :List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) snake_case__ :List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCamelCase ) + "\n" ) with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp: fp.write("\n".join(UpperCamelCase ) ) snake_case__ :Optional[Any] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } snake_case__ :int = os.path.join(self.tmpdirname ,UpperCamelCase ) with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp: json.dump(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> Optional[Any]: return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> List[Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> List[str]: return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Dict: shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Optional[int] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] snake_case__ :Optional[int] = [Image.fromarray(np.moveaxis(UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :str = self.get_tokenizer() snake_case__ :Union[str, Any] = self.get_rust_tokenizer() snake_case__ :Union[str, Any] = self.get_image_processor() snake_case__ :Any = CLIPProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) snake_case__ :Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCamelCase ) snake_case__ :Optional[int] = CLIPProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) snake_case__ :str = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Optional[int] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case__ :Dict = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" ) snake_case__ :Optional[Any] = self.get_image_processor(do_normalize=UpperCamelCase ,padding_value=1.0 ) snake_case__ :Any = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :Any = self.get_image_processor() snake_case__ :Dict = self.get_tokenizer() snake_case__ :List[str] = CLIPProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) snake_case__ :List[Any] = self.prepare_image_inputs() snake_case__ :Optional[Any] = image_processor(UpperCamelCase ,return_tensors="np" ) snake_case__ :Dict = processor(images=UpperCamelCase ,return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :Tuple = self.get_image_processor() snake_case__ :int = self.get_tokenizer() snake_case__ :Dict = CLIPProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) snake_case__ :Optional[int] = "lower newer" snake_case__ :List[str] = processor(text=UpperCamelCase ) snake_case__ :Optional[Any] = tokenizer(UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ :Tuple = self.get_image_processor() snake_case__ :Optional[Any] = self.get_tokenizer() snake_case__ :List[Any] = CLIPProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) snake_case__ :str = "lower newer" snake_case__ :Dict = self.prepare_image_inputs() snake_case__ :int = processor(text=UpperCamelCase ,images=UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase ): processor() def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :int = self.get_image_processor() snake_case__ :Optional[Any] = self.get_tokenizer() snake_case__ :Any = CLIPProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) snake_case__ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case__ :Any = processor.batch_decode(UpperCamelCase ) snake_case__ :Dict = tokenizer.batch_decode(UpperCamelCase ) self.assertListEqual(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> int: snake_case__ :Any = self.get_image_processor() snake_case__ :Any = self.get_tokenizer() snake_case__ :Dict = CLIPProcessor(tokenizer=UpperCamelCase ,image_processor=UpperCamelCase ) snake_case__ :Dict = "lower newer" snake_case__ :Any = self.prepare_image_inputs() snake_case__ :str = processor(text=UpperCamelCase ,images=UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
57
from __future__ import annotations import math def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int: '''simple docstring''' if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__snake_case ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) return min( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) def lowercase_ ( ) -> None: '''simple docstring''' snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] snake_case__ :int = math.log(len(__snake_case ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
57
1
import numpy as np def lowercase_ ( __snake_case : np.array ) -> np.array: '''simple docstring''' return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
57
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any: '''simple docstring''' snake_case__ :Optional[Any] = b.T snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 ) snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 ) snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case ) snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :] return d def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any: '''simple docstring''' snake_case__ :Optional[Any] = x.reshape(-1 , 3 ) snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case ) return np.argmin(__snake_case , axis=1 ) class _snake_case ( _A ): _A = ['pixel_values'] def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None: super().__init__(**UpperCamelCase ) snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256} snake_case__ :str = get_size_dict(UpperCamelCase ) snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None snake_case__ :str = do_resize snake_case__ :List[str] = size snake_case__ :List[Any] = resample snake_case__ :Union[str, Any] = do_normalize snake_case__ :int = do_color_quantize def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray: snake_case__ :List[str] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' ) return resize( UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray: snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase ) snake_case__ :List[Any] = image - 1 return image def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image: snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize snake_case__ :int = size if size is not None else self.size snake_case__ :Tuple = get_size_dict(UpperCamelCase ) snake_case__ :str = resample if resample is not None else self.resample snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize snake_case__ :List[Any] = clusters if clusters is not None else self.clusters snake_case__ :str = np.array(UpperCamelCase ) snake_case__ :int = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images] if do_normalize: snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images] if do_color_quantize: snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) snake_case__ :Union[str, Any] = np.array(UpperCamelCase ) snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) snake_case__ :List[Any] = images.shape[0] snake_case__ :str = images.reshape(UpperCamelCase ,-1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. snake_case__ :Any = list(UpperCamelCase ) else: snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images] snake_case__ :List[str] = {"input_ids": images} return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
57
1
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowercase_ ( __snake_case : int , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any]=True , __snake_case : Optional[int]="pt" ) -> Any: '''simple docstring''' snake_case__ :List[Any] = {"add_prefix_space": True} if isinstance(__snake_case , __snake_case ) and not line.startswith(" " ) else {} snake_case__ :Tuple = padding_side return tokenizer( [line] , max_length=__snake_case , padding="max_length" if pad_to_max_length else None , truncation=__snake_case , return_tensors=__snake_case , add_special_tokens=__snake_case , **__snake_case , ) def lowercase_ ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int]=None , ) -> Any: '''simple docstring''' snake_case__ :Dict = input_ids.ne(__snake_case ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class _snake_case ( _A ): def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="train" ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="" ,) -> str: super().__init__() snake_case__ :Dict = Path(UpperCamelCase ).joinpath(type_path + ".source" ) snake_case__ :Union[str, Any] = Path(UpperCamelCase ).joinpath(type_path + ".target" ) snake_case__ :int = self.get_char_lens(self.src_file ) snake_case__ :str = max_source_length snake_case__ :int = max_target_length assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}' snake_case__ :Union[str, Any] = tokenizer snake_case__ :Optional[Any] = prefix if n_obs is not None: snake_case__ :Tuple = self.src_lens[:n_obs] snake_case__ :Dict = src_lang snake_case__ :str = tgt_lang def __len__( self ) -> int: return len(self.src_lens ) def __getitem__( self ,UpperCamelCase ) -> Dict[str, torch.Tensor]: snake_case__ :int = index + 1 # linecache starts at 1 snake_case__ :Tuple = self.prefix + linecache.getline(str(self.src_file ) ,UpperCamelCase ).rstrip("\n" ) snake_case__ :Any = linecache.getline(str(self.tgt_file ) ,UpperCamelCase ).rstrip("\n" ) assert source_line, f'empty source line for index {index}' assert tgt_line, f'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,UpperCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right snake_case__ :Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,UpperCamelCase ) else self.tokenizer ) snake_case__ :Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer ,UpperCamelCase ) else self.tokenizer snake_case__ :List[Any] = encode_line(UpperCamelCase ,UpperCamelCase ,self.max_source_length ,"right" ) snake_case__ :str = encode_line(UpperCamelCase ,UpperCamelCase ,self.max_target_length ,"right" ) snake_case__ :List[str] = source_inputs["input_ids"].squeeze() snake_case__ :Optional[Any] = target_inputs["input_ids"].squeeze() snake_case__ :List[str] = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCAmelCase_ ( UpperCamelCase ) -> int: return [len(UpperCamelCase ) for x in Path(UpperCamelCase ).open().readlines()] def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Dict[str, torch.Tensor]: snake_case__ :Union[str, Any] = torch.stack([x["input_ids"] for x in batch] ) snake_case__ :Optional[Any] = torch.stack([x["attention_mask"] for x in batch] ) snake_case__ :Tuple = torch.stack([x["decoder_input_ids"] for x in batch] ) snake_case__ :Dict = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case__ :Union[str, Any] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case__ :Optional[Any] = trim_batch(UpperCamelCase ,UpperCamelCase ) snake_case__ , snake_case__ :Optional[Any] = trim_batch(UpperCamelCase ,UpperCamelCase ,attention_mask=UpperCamelCase ) snake_case__ :int = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch __UpperCAmelCase : Union[str, Any] = getLogger(__name__) def lowercase_ ( __snake_case : List[List] ) -> Dict: '''simple docstring''' return list(itertools.chain.from_iterable(__snake_case ) ) def lowercase_ ( __snake_case : str ) -> None: '''simple docstring''' snake_case__ :Optional[Any] = get_git_info() save_json(__snake_case , os.path.join(__snake_case , "git_log.json" ) ) def lowercase_ ( __snake_case : int , __snake_case : Tuple , __snake_case : Tuple=4 , **__snake_case : Optional[Any] ) -> Tuple: '''simple docstring''' with open(__snake_case , "w" ) as f: json.dump(__snake_case , __snake_case , indent=__snake_case , **__snake_case ) def lowercase_ ( __snake_case : Dict ) -> Any: '''simple docstring''' with open(__snake_case ) as f: return json.load(__snake_case ) def lowercase_ ( ) -> Optional[Any]: '''simple docstring''' snake_case__ :Tuple = git.Repo(search_parent_directories=__snake_case ) snake_case__ :Any = { "repo_id": str(__snake_case ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def lowercase_ ( __snake_case : Callable , __snake_case : Iterable ) -> List: '''simple docstring''' return list(map(__snake_case , __snake_case ) ) def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] ) -> Tuple: '''simple docstring''' with open(__snake_case , "wb" ) as f: return pickle.dump(__snake_case , __snake_case ) def lowercase_ ( __snake_case : str ) -> List[Any]: '''simple docstring''' def remove_articles(__snake_case : Tuple ): return re.sub(R"\b(a|an|the)\b" , " " , __snake_case ) def white_space_fix(__snake_case : str ): return " ".join(text.split() ) def remove_punc(__snake_case : Optional[Any] ): snake_case__ :str = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__snake_case : Optional[int] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) ) def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> int: '''simple docstring''' snake_case__ :Union[str, Any] = normalize_answer(__snake_case ).split() snake_case__ :Any = normalize_answer(__snake_case ).split() snake_case__ :Optional[Any] = Counter(__snake_case ) & Counter(__snake_case ) snake_case__ :Union[str, Any] = sum(common.values() ) if num_same == 0: return 0 snake_case__ :Tuple = 1.0 * num_same / len(__snake_case ) snake_case__ :Any = 1.0 * num_same / len(__snake_case ) snake_case__ :Any = (2 * precision * recall) / (precision + recall) return fa def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Optional[Any] ) -> Tuple: '''simple docstring''' return normalize_answer(__snake_case ) == normalize_answer(__snake_case ) def lowercase_ ( __snake_case : List[str] , __snake_case : List[str] ) -> Dict: '''simple docstring''' assert len(__snake_case ) == len(__snake_case ) snake_case__ :Union[str, Any] = 0 for hypo, pred in zip(__snake_case , __snake_case ): em += exact_match_score(__snake_case , __snake_case ) if len(__snake_case ) > 0: em /= len(__snake_case ) return {"em": em} def lowercase_ ( __snake_case : int ) -> Dict: '''simple docstring''' return model_prefix.startswith("rag" ) def lowercase_ ( __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict ) -> List[Any]: '''simple docstring''' snake_case__ :Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead snake_case__ :Tuple = "dropout_rate" for p in extra_params: if getattr(__snake_case , __snake_case , __snake_case ): if not hasattr(__snake_case , __snake_case ) and not hasattr(__snake_case , equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(__snake_case ) ) delattr(__snake_case , __snake_case ) continue snake_case__ :int = p if hasattr(__snake_case , __snake_case ) else equivalent_param[p] setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) ) delattr(__snake_case , __snake_case ) return hparams, config
57
import pytest __UpperCAmelCase : int = "__dummy_dataset1__" __UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def lowercase_ ( ) -> Optional[Any]: '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowercase_ ( ) -> Optional[int]: '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict: '''simple docstring''' snake_case__ :Optional[Any] = dataset_loading_script_name snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name script_dir.mkdir(parents=__snake_case ) snake_case__ :List[Any] = script_dir / F'{script_name}.py' with open(__snake_case , "w" ) as f: f.write(__snake_case ) return str(__snake_case )
57
1
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __UpperCAmelCase : Optional[int] = "src/transformers" __UpperCAmelCase : Tuple = "docs/source/en/tasks" def lowercase_ ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : int ) -> str: '''simple docstring''' with open(__snake_case , "r" , encoding="utf-8" , newline="\n" ) as f: snake_case__ :int = f.readlines() # Find the start prompt. snake_case__ :Optional[Any] = 0 while not lines[start_index].startswith(__snake_case ): start_index += 1 start_index += 1 snake_case__ :Optional[int] = start_index while not lines[end_index].startswith(__snake_case ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH) __UpperCAmelCase : Dict = { "asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, "audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, "language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, "image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, "masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, "multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, "object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, "question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, "sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, "translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, "document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, "monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __UpperCAmelCase : List[Any] = { "summarization.md": ("nllb",), "translation.md": ("nllb",), } def lowercase_ ( __snake_case : Tuple ) -> Optional[Any]: '''simple docstring''' snake_case__ :Union[str, Any] = TASK_GUIDE_TO_MODELS[task_guide] snake_case__ :Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__snake_case , set() ) snake_case__ :str = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def lowercase_ ( __snake_case : int , __snake_case : List[Any]=False ) -> Dict: '''simple docstring''' snake_case__ , snake_case__ , snake_case__ , snake_case__ :Dict = _find_text_in_file( filename=os.path.join(__snake_case , __snake_case ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) snake_case__ :Union[str, Any] = get_model_list_for_task(__snake_case ) if current_list != new_list: if overwrite: with open(os.path.join(__snake_case , __snake_case ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' " to fix this." ) if __name__ == "__main__": __UpperCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __UpperCAmelCase : Tuple = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
57
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
57
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __UpperCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = ["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys __UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __UpperCAmelCase : Dict = True except ImportError: __UpperCAmelCase : List[Any] = False __UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase_ ( __snake_case : Namespace ) -> Dict: '''simple docstring''' return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class _snake_case ( _A ): @staticmethod def lowerCAmelCase_ ( UpperCamelCase ) -> Any: snake_case__ :Dict = parser.add_parser("add-new-model" ) add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." ) add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." ) add_new_model_parser.add_argument( "--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." ) add_new_model_parser.set_defaults(func=UpperCamelCase ) def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any: snake_case__ :Union[str, Any] = testing snake_case__ :Union[str, Any] = testing_file snake_case__ :List[str] = path def lowerCAmelCase_ ( self ) -> List[Any]: warnings.warn( "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. " "It is not actively maintained anymore, so might give a result that won't pass all tests and quality " "checks, you should use `transformers-cli add-new-model-like` instead." ) if not _has_cookiecutter: raise ImportError( "Model creation dependencies are required to use the `add_new_model` command. Install them by running " "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]] if len(UpperCamelCase ) > 0: raise ValueError( "Several directories starting with `cookiecutter-template-` in current working directory. " "Please clean your directory by removing all folders starting with `cookiecutter-template-` or " "change your working directory." ) snake_case__ :str = ( Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model" # Execute cookiecutter if not self._testing: cookiecutter(str(UpperCamelCase ) ) else: with open(self._testing_file ,"r" ) as configuration_file: snake_case__ :str = json.load(UpperCamelCase ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,) snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0] # Retrieve configuration with open(directory + "/configuration.json" ,"r" ) as configuration_file: snake_case__ :Dict = json.load(UpperCamelCase ) snake_case__ :Optional[Any] = configuration["lowercase_modelname"] snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"] os.remove(f'{directory}/configuration.json' ) snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase ) os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase ) # Tests require submodules as they have parent imports with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ): pass shutil.move( f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,) shutil.move( f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,) def remove_copy_lines(UpperCamelCase ): with open(UpperCamelCase ,"r" ) as f: snake_case__ :List[str] = f.readlines() with open(UpperCamelCase ,"w" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(UpperCamelCase ) if output_pytorch: if not self._testing: remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' ) if output_tensorflow: if not self._testing: remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ) if output_flax: if not self._testing: remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ) shutil.move( f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,) shutil.move( f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ): # Create temp file snake_case__ , snake_case__ :Optional[Any] = mkstemp() snake_case__ :Optional[Any] = False with fdopen(UpperCamelCase ,"w" ) as new_file: with open(UpperCamelCase ) as old_file: for line in old_file: new_file.write(UpperCamelCase ) if line_to_copy_below in line: snake_case__ :Optional[Any] = True for line_to_copy in lines_to_copy: new_file.write(UpperCamelCase ) if not line_found: raise ValueError(f'Line {line_to_copy_below} was not found in file.' ) # Copy the file permissions from the old file to the new file copymode(UpperCamelCase ,UpperCamelCase ) # Remove original file remove(UpperCamelCase ) # Move new file move(UpperCamelCase ,UpperCamelCase ) def skip_units(UpperCamelCase ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(UpperCamelCase ): with open(UpperCamelCase ) as datafile: snake_case__ :int = [] snake_case__ :Optional[int] = False snake_case__ :List[str] = False for line in datafile: if "# To replace in: " in line and "##" not in line: snake_case__ :Optional[Any] = line.split("\"" )[1] snake_case__ :Tuple = skip_units(UpperCamelCase ) elif "# Below: " in line and "##" not in line: snake_case__ :Optional[Any] = line.split("\"" )[1] snake_case__ :List[str] = skip_units(UpperCamelCase ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) snake_case__ :Tuple = [] elif "# Replace with" in line and "##" not in line: snake_case__ :Optional[Any] = [] elif "##" not in line: lines_to_copy.append(UpperCamelCase ) remove(UpperCamelCase ) replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' ) os.rmdir(UpperCamelCase )
57
1
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __UpperCAmelCase : Any = pytest.mark.integration __UpperCAmelCase : List[str] = {"comet"} __UpperCAmelCase : List[str] = importlib.util.find_spec("fairseq") is not None __UpperCAmelCase : Optional[int] = {"code_eval"} __UpperCAmelCase : Optional[Any] = os.name == "nt" __UpperCAmelCase : Tuple = {"bertscore", "frugalscore", "perplexity"} __UpperCAmelCase : str = importlib.util.find_spec("transformers") is not None def lowercase_ ( __snake_case : str ) -> str: '''simple docstring''' @wraps(__snake_case ) def wrapper(self : Union[str, Any] , __snake_case : Dict ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"" ) else: test_case(self , __snake_case ) return wrapper def lowercase_ ( __snake_case : str ) -> Any: '''simple docstring''' @wraps(__snake_case ) def wrapper(self : Any , __snake_case : Any ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"" ) else: test_case(self , __snake_case ) return wrapper def lowercase_ ( __snake_case : Optional[int] ) -> List[Any]: '''simple docstring''' @wraps(__snake_case ) def wrapper(self : Optional[int] , __snake_case : Tuple ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"" ) else: test_case(self , __snake_case ) return wrapper def lowercase_ ( ) -> Dict: '''simple docstring''' snake_case__ :List[str] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( _A , _A , _A ) @local class _snake_case ( parameterized.TestCase ): _A = {} _A = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" ) def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]: snake_case__ :List[Any] = "[...]" snake_case__ :List[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" ,UpperCamelCase ) ).module_path ) snake_case__ :Union[str, Any] = datasets.load.import_main_class(metric_module.__name__ ,dataset=UpperCamelCase ) # check parameters snake_case__ :Optional[int] = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(UpperCamelCase ,metric_module.__name__ ): with self.use_local_metrics(): try: snake_case__ :Tuple = doctest.testmod(UpperCamelCase ,verbose=UpperCamelCase ,raise_on_error=UpperCamelCase ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed ,0 ) self.assertGreater(results.attempted ,1 ) @slow def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Dict: snake_case__ :List[Any] = "[...]" snake_case__ :Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" ,UpperCamelCase ) ).module_path ) # run doctest with self.use_local_metrics(): snake_case__ :str = doctest.testmod(UpperCamelCase ,verbose=UpperCamelCase ,raise_on_error=UpperCamelCase ) self.assertEqual(results.failed ,0 ) self.assertGreater(results.attempted ,1 ) @contextmanager def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]: if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase ): yield else: yield @contextmanager def lowerCAmelCase_ ( self ) -> Dict: def load_local_metric(UpperCamelCase ,*UpperCamelCase ,**UpperCamelCase ): return load_metric(os.path.join("metrics" ,UpperCamelCase ) ,*UpperCamelCase ,**UpperCamelCase ) with patch("datasets.load_metric" ) as mock_load_metric: snake_case__ :Any = load_local_metric yield @classmethod def lowerCAmelCase_ ( cls ,UpperCamelCase ) -> Any: def wrapper(UpperCamelCase ): snake_case__ :Optional[Any] = contextmanager(UpperCamelCase ) snake_case__ :Optional[Any] = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt" ) def lowercase_ ( __snake_case : List[Any] ) -> int: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags class _snake_case ( _A ): def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[str]: assert len(input_dict["input_ids"] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor" ) as mock_create_predictor: snake_case__ :List[Any] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore" ) def lowercase_ ( __snake_case : Dict ) -> Dict: '''simple docstring''' import torch def bert_cos_score_idf(__snake_case : List[Any] , __snake_case : Tuple , *__snake_case : Optional[int] , **__snake_case : List[Any] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__snake_case ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model" ), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: snake_case__ :Any = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet" ) def lowercase_ ( __snake_case : Union[str, Any] ) -> Optional[int]: '''simple docstring''' def load_from_checkpoint(__snake_case : List[Any] ): class _snake_case : def lowerCAmelCase_ ( self ,UpperCamelCase ,*UpperCamelCase ,**UpperCamelCase ) -> Dict: assert len(UpperCamelCase ) == 2 snake_case__ :int = [0.19, 0.92] return scores, sum(UpperCamelCase ) / len(UpperCamelCase ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model" ) as mock_download_model: snake_case__ :Union[str, Any] = None with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint: snake_case__ :Union[str, Any] = load_from_checkpoint yield def lowercase_ ( ) -> List[str]: '''simple docstring''' snake_case__ :Optional[Any] = load_metric(os.path.join("metrics" , "seqeval" ) ) snake_case__ :List[str] = "ERROR" snake_case__ :Dict = F'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}' with pytest.raises(__snake_case , match=re.escape(__snake_case ) ): metric.compute(predictions=[] , references=[] , scheme=__snake_case )
57
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer __UpperCAmelCase : str = logging.get_logger(__name__) __UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : List[Any] = { "vocab_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json" }, "merges_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt" }, } __UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4} __UpperCAmelCase : List[str] = {} class _snake_case ( _A ): _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_INIT_CONFIGURATION _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = HerbertTokenizer def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict: super().__init__( UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]: snake_case__ :Optional[int] = [self.cls_token_id] snake_case__ :Any = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1] def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]: snake_case__ :Any = [self.sep_token_id] snake_case__ :Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]: snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase ) return tuple(UpperCamelCase )
57
1
from statistics import mean import numpy as np def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : list , __snake_case : int ) -> list: '''simple docstring''' snake_case__ :str = 0 # Number of processes finished snake_case__ :List[Any] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. snake_case__ :str = [0] * no_of_process # List to include calculation results snake_case__ :Any = [0] * no_of_process # Sort by arrival time. snake_case__ :Tuple = [burst_time[i] for i in np.argsort(__snake_case )] snake_case__ :Optional[Any] = [process_name[i] for i in np.argsort(__snake_case )] arrival_time.sort() while no_of_process > finished_process_count: snake_case__ :str = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: snake_case__ :Optional[int] = arrival_time[i] snake_case__ :Dict = 0 # Index showing the location of the process being performed snake_case__ :Optional[int] = 0 # Saves the current response ratio. snake_case__ :List[str] = 0 for i in range(0 , __snake_case ): if finished_process[i] == 0 and arrival_time[i] <= current_time: snake_case__ :int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: snake_case__ :Dict = temp snake_case__ :List[Any] = i # Calculate the turn around time snake_case__ :List[Any] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. snake_case__ :List[Any] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : list , __snake_case : int ) -> list: '''simple docstring''' snake_case__ :Any = [0] * no_of_process for i in range(0 , __snake_case ): snake_case__ :Optional[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": __UpperCAmelCase : Optional[Any] = 5 __UpperCAmelCase : Any = ["A", "B", "C", "D", "E"] __UpperCAmelCase : str = [1, 2, 3, 4, 5] __UpperCAmelCase : Any = [1, 2, 3, 4, 5] __UpperCAmelCase : str = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) __UpperCAmelCase : Tuple = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time") for i in range(0, no_of_process): print( F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t''' F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}''' ) print(F'''average waiting time : {mean(waiting_time):.5f}''') print(F'''average turn around time : {mean(turn_around_time):.5f}''')
57
def lowercase_ ( __snake_case : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError("p should not be less than 2!" ) elif p == 2: return True snake_case__ :List[str] = 4 snake_case__ :Optional[int] = (1 << p) - 1 for _ in range(p - 2 ): snake_case__ :List[Any] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(1_1))
57
1
from abc import ABC, abstractmethod from typing import List, Optional class _snake_case ( _A ): def __init__( self ) -> str: # test for the above condition self.test() def lowerCAmelCase_ ( self ) -> List[str]: snake_case__ :List[str] = 0 snake_case__ :str = False while not completed: if counter == 1: self.reset() snake_case__ :Optional[int] = self.advance() if not self.does_advance(UpperCamelCase ): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) snake_case__ , snake_case__ , snake_case__ :Union[str, Any] = self.update(UpperCamelCase ) counter += 1 if counter > 10_000: raise Exception("update() does not fulfill the constraint." ) if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly." ) @abstractmethod def lowerCAmelCase_ ( self ) -> Dict: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[int]: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Union[str, Any]: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCAmelCase_ ( self ) -> Tuple: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCAmelCase_ ( self ) -> str: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCAmelCase_ ( self ,UpperCamelCase=False ) -> Any: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class _snake_case ( _A ): def __init__( self ,UpperCamelCase ) -> str: super(UpperCamelCase ,self ).__init__() if not isinstance(UpperCamelCase ,UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' ) if any((not isinstance(UpperCamelCase ,UpperCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' ) snake_case__ :List[Any] = token_ids snake_case__ :List[str] = len(self.token_ids ) snake_case__ :Optional[Any] = -1 # the index of the currently fulfilled step snake_case__ :Dict = False def lowerCAmelCase_ ( self ) -> List[str]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[int]: if not isinstance(UpperCamelCase ,UpperCamelCase ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[str]: if not isinstance(UpperCamelCase ,UpperCamelCase ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}' ) snake_case__ :Tuple = False snake_case__ :str = False snake_case__ :Tuple = False if self.does_advance(UpperCamelCase ): self.fulfilled_idx += 1 snake_case__ :int = True if self.fulfilled_idx == (self.seqlen - 1): snake_case__ :List[Any] = True snake_case__ :Tuple = completed else: # failed to make progress. snake_case__ :List[Any] = True self.reset() return stepped, completed, reset def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Union[str, Any] = False snake_case__ :Union[str, Any] = 0 def lowerCAmelCase_ ( self ) -> List[Any]: return self.seqlen - (self.fulfilled_idx + 1) def lowerCAmelCase_ ( self ,UpperCamelCase=False ) -> Optional[Any]: snake_case__ :Tuple = PhrasalConstraint(self.token_ids ) if stateful: snake_case__ :str = self.seqlen snake_case__ :Optional[Any] = self.fulfilled_idx snake_case__ :List[str] = self.completed return new_constraint class _snake_case : def __init__( self ,UpperCamelCase ,UpperCamelCase=True ) -> Tuple: snake_case__ :Optional[int] = max([len(UpperCamelCase ) for one in nested_token_ids] ) snake_case__ :Union[str, Any] = {} for token_ids in nested_token_ids: snake_case__ :Tuple = root for tidx, token_id in enumerate(UpperCamelCase ): if token_id not in level: snake_case__ :Tuple = {} snake_case__ :List[str] = level[token_id] if no_subsets and self.has_subsets(UpperCamelCase ,UpperCamelCase ): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" f' {nested_token_ids}.' ) snake_case__ :List[Any] = root def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Union[str, Any]: snake_case__ :str = self.trie for current_token in current_seq: snake_case__ :Any = start[current_token] snake_case__ :Union[str, Any] = list(start.keys() ) return next_tokens def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[Any]: snake_case__ :str = self.next_tokens(UpperCamelCase ) return len(UpperCamelCase ) == 0 def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str: snake_case__ :int = list(root.values() ) if len(UpperCamelCase ) == 0: return 1 else: return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Dict: snake_case__ :Any = self.count_leaves(UpperCamelCase ) return len(UpperCamelCase ) != leaf_count class _snake_case ( _A ): def __init__( self ,UpperCamelCase ) -> int: super(UpperCamelCase ,self ).__init__() if not isinstance(UpperCamelCase ,UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' ) if any(not isinstance(UpperCamelCase ,UpperCamelCase ) for token_ids in nested_token_ids ): raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' ) if any( any((not isinstance(UpperCamelCase ,UpperCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' ) snake_case__ :Dict = DisjunctiveTrie(UpperCamelCase ) snake_case__ :List[Any] = nested_token_ids snake_case__ :Tuple = self.trie.max_height snake_case__ :Tuple = [] snake_case__ :str = False def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :List[Any] = self.trie.next_tokens(self.current_seq ) if len(UpperCamelCase ) == 0: return None else: return token_list def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int: if not isinstance(UpperCamelCase ,UpperCamelCase ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}' ) snake_case__ :Optional[Any] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str: if not isinstance(UpperCamelCase ,UpperCamelCase ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}' ) snake_case__ :Optional[int] = False snake_case__ :Tuple = False snake_case__ :Dict = False if self.does_advance(UpperCamelCase ): self.current_seq.append(UpperCamelCase ) snake_case__ :Optional[Any] = True else: snake_case__ :Optional[int] = True self.reset() snake_case__ :Dict = self.trie.reached_leaf(self.current_seq ) snake_case__ :Any = completed return stepped, completed, reset def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :Optional[Any] = False snake_case__ :Optional[int] = [] def lowerCAmelCase_ ( self ) -> List[str]: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def lowerCAmelCase_ ( self ,UpperCamelCase=False ) -> Optional[Any]: snake_case__ :Tuple = DisjunctiveConstraint(self.token_ids ) if stateful: snake_case__ :str = self.seqlen snake_case__ :Tuple = self.current_seq snake_case__ :Optional[Any] = self.completed return new_constraint class _snake_case : def __init__( self ,UpperCamelCase ) -> Union[str, Any]: snake_case__ :Dict = constraints # max # of steps required to fulfill a given constraint snake_case__ :Any = max([c.seqlen for c in constraints] ) snake_case__ :Dict = len(UpperCamelCase ) snake_case__ :int = False self.init_state() def lowerCAmelCase_ ( self ) -> int: snake_case__ :Optional[int] = [] snake_case__ :Tuple = None snake_case__ :Any = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints] def lowerCAmelCase_ ( self ) -> str: snake_case__ :List[str] = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def lowerCAmelCase_ ( self ) -> List[str]: snake_case__ :Any = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" snake_case__ :int = constraint.advance() if isinstance(UpperCamelCase ,UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase ,UpperCamelCase ): token_list.extend(UpperCamelCase ) else: snake_case__ :List[Any] = self.inprogress_constraint.advance() if isinstance(UpperCamelCase ,UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase ,UpperCamelCase ): token_list.extend(UpperCamelCase ) if len(UpperCamelCase ) == 0: return None else: return token_list def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[str]: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint snake_case__ , snake_case__ :Optional[Any] = self.add(UpperCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Any: if not isinstance(UpperCamelCase ,UpperCamelCase ): raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' ) snake_case__ , snake_case__ :Optional[Any] = False, False if self.completed: snake_case__ :List[Any] = True snake_case__ :str = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state snake_case__ , snake_case__ , snake_case__ :Dict = self.inprogress_constraint.update(UpperCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) ) snake_case__ :Any = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) snake_case__ :str = None if len(self.pending_constraints ) == 0: # we're done! snake_case__ :int = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(UpperCamelCase ): snake_case__ , snake_case__ , snake_case__ :Union[str, Any] = pending_constraint.update(UpperCamelCase ) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(UpperCamelCase ) snake_case__ :Union[str, Any] = None if not complete and stepped: snake_case__ :Union[str, Any] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". snake_case__ :List[str] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. snake_case__ :int = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def lowerCAmelCase_ ( self ,UpperCamelCase=True ) -> Optional[int]: snake_case__ :List[str] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: snake_case__ :Optional[Any] = [ constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: snake_case__ :List[Any] = self.inprogress_constraint.copy(stateful=UpperCamelCase ) snake_case__ :str = [constraint.copy() for constraint in self.pending_constraints] return new_state
57
from typing import Any def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list: '''simple docstring''' _validation( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # Creates data structures and fill initial step snake_case__ :dict = {} snake_case__ :dict = {} for state in states_space: snake_case__ :List[Any] = observations_space[0] snake_case__ :str = ( initial_probabilities[state] * emission_probabilities[state][observation] ) snake_case__ :str = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__snake_case ) ): snake_case__ :Any = observations_space[o] snake_case__ :Tuple = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function snake_case__ :Tuple = "" snake_case__ :Union[str, Any] = -1 for k_state in states_space: snake_case__ :int = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: snake_case__ :str = probability snake_case__ :Tuple = k_state # Update probabilities and pointers dicts snake_case__ :List[str] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) snake_case__ :List[str] = arg_max # The final observation snake_case__ :str = observations_space[len(__snake_case ) - 1] # argmax for given final observation snake_case__ :Optional[int] = "" snake_case__ :List[str] = -1 for k_state in states_space: snake_case__ :List[str] = probabilities[(k_state, final_observation)] if probability > max_probability: snake_case__ :List[str] = probability snake_case__ :int = k_state snake_case__ :Any = arg_max # Process pointers backwards snake_case__ :int = last_state snake_case__ :List[str] = [] for o in range(len(__snake_case ) - 1 , -1 , -1 ): result.append(__snake_case ) snake_case__ :List[str] = pointers[previous, observations_space[o]] result.reverse() return result def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' _validate_not_empty( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) _validate_lists(__snake_case , __snake_case ) _validate_dicts( __snake_case , __snake_case , __snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None: '''simple docstring''' _validate_list(__snake_case , "observations_space" ) _validate_list(__snake_case , "states_space" ) def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None: '''simple docstring''' if not isinstance(_object , __snake_case ): snake_case__ :Optional[int] = F'{var_name} must be a list' raise ValueError(__snake_case ) else: for x in _object: if not isinstance(__snake_case , __snake_case ): snake_case__ :Any = F'{var_name} must be a list of strings' raise ValueError(__snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' _validate_dict(__snake_case , "initial_probabilities" , __snake_case ) _validate_nested_dict(__snake_case , "transition_probabilities" ) _validate_nested_dict(__snake_case , "emission_probabilities" ) def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None: '''simple docstring''' _validate_dict(_object , __snake_case , __snake_case ) for x in _object.values(): _validate_dict(__snake_case , __snake_case , __snake_case , __snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None: '''simple docstring''' if not isinstance(_object , __snake_case ): snake_case__ :str = F'{var_name} must be a dict' raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object ): snake_case__ :List[Any] = F'{var_name} all keys must be strings' raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ): snake_case__ :Optional[int] = "nested dictionary " if nested else "" snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(__snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
57
1
from __future__ import annotations __UpperCAmelCase : Any = [] def lowercase_ ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int ) -> bool: '''simple docstring''' for i in range(len(__snake_case ) ): if board[row][i] == 1: return False for i in range(len(__snake_case ) ): if board[i][column] == 1: return False for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ): if board[i][j] == 1: return False return True def lowercase_ ( __snake_case : list[list[int]] , __snake_case : int ) -> bool: '''simple docstring''' if row >= len(__snake_case ): solution.append(__snake_case ) printboard(__snake_case ) print() return True for i in range(len(__snake_case ) ): if is_safe(__snake_case , __snake_case , __snake_case ): snake_case__ :Union[str, Any] = 1 solve(__snake_case , row + 1 ) snake_case__ :int = 0 return False def lowercase_ ( __snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in range(len(__snake_case ) ): for j in range(len(__snake_case ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) __UpperCAmelCase : Tuple = 8 __UpperCAmelCase : Optional[Any] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
57
def lowercase_ ( __snake_case : str ) -> list: '''simple docstring''' return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(__snake_case ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
57
1
def lowercase_ ( __snake_case : str , __snake_case : int ) -> str: '''simple docstring''' snake_case__ :list[list[str]] = [[] for _ in range(__snake_case )] snake_case__ :List[str] = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(__snake_case ) <= key: return input_string for position, character in enumerate(__snake_case ): snake_case__ :str = position % (lowest * 2) # puts it in bounds snake_case__ :str = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(__snake_case ) snake_case__ :Union[str, Any] = ["".join(__snake_case ) for row in temp_grid] snake_case__ :Optional[int] = "".join(__snake_case ) return output_string def lowercase_ ( __snake_case : str , __snake_case : int ) -> str: '''simple docstring''' snake_case__ :Union[str, Any] = [] snake_case__ :Tuple = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string snake_case__ :list[list[str]] = [[] for _ in range(__snake_case )] # generates template for position in range(len(__snake_case ) ): snake_case__ :Tuple = position % (lowest * 2) # puts it in bounds snake_case__ :Union[str, Any] = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) snake_case__ :str = 0 for row in temp_grid: # fills in the characters snake_case__ :Dict = input_string[counter : counter + len(__snake_case )] grid.append(list(__snake_case ) ) counter += len(__snake_case ) snake_case__ :Union[str, Any] = "" # reads as zigzag for position in range(len(__snake_case ) ): snake_case__ :Any = position % (lowest * 2) # puts it in bounds snake_case__ :int = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def lowercase_ ( __snake_case : str ) -> dict[int, str]: '''simple docstring''' snake_case__ :Tuple = {} for key_guess in range(1 , len(__snake_case ) ): # tries every key snake_case__ :Dict = decrypt(__snake_case , __snake_case ) return results if __name__ == "__main__": import doctest doctest.testmod()
57
def lowercase_ ( __snake_case : int = 10_00 ) -> int: '''simple docstring''' snake_case__ :int = 3 snake_case__ :int = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
57
1
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def lowercase_ ( __snake_case : Optional[int] , __snake_case : Optional[Any]="shi-labs/oneformer_demo" ) -> List[str]: '''simple docstring''' with open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) as f: snake_case__ :Any = json.load(__snake_case ) snake_case__ :Dict = {} snake_case__ :int = [] snake_case__ :Optional[int] = [] for key, info in class_info.items(): snake_case__ :Optional[int] = info["name"] class_names.append(info["name"] ) if info["isthing"]: thing_ids.append(int(__snake_case ) ) snake_case__ :Any = thing_ids snake_case__ :Dict = class_names return metadata class _snake_case ( unittest.TestCase ): def __init__( self ,UpperCamelCase ,UpperCamelCase=7 ,UpperCamelCase=3 ,UpperCamelCase=30 ,UpperCamelCase=400 ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=[0.5, 0.5, 0.5] ,UpperCamelCase=[0.5, 0.5, 0.5] ,UpperCamelCase=10 ,UpperCamelCase=False ,UpperCamelCase=255 ,UpperCamelCase="shi-labs/oneformer_demo" ,UpperCamelCase="ade20k_panoptic.json" ,UpperCamelCase=10 ,) -> Tuple: snake_case__ :Any = parent snake_case__ :Optional[int] = batch_size snake_case__ :Any = num_channels snake_case__ :Dict = min_resolution snake_case__ :Any = max_resolution snake_case__ :Optional[Any] = do_resize snake_case__ :Dict = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size snake_case__ :List[str] = do_normalize snake_case__ :Optional[Any] = image_mean snake_case__ :List[Any] = image_std snake_case__ :List[Any] = class_info_file snake_case__ :Optional[Any] = prepare_metadata(UpperCamelCase ,UpperCamelCase ) snake_case__ :str = num_text snake_case__ :List[str] = repo_path # for the post_process_functions snake_case__ :List[str] = 2 snake_case__ :str = 10 snake_case__ :Optional[int] = 10 snake_case__ :Tuple = 3 snake_case__ :List[Any] = 4 snake_case__ :Any = num_labels snake_case__ :List[Any] = do_reduce_labels snake_case__ :Union[str, Any] = ignore_index def lowerCAmelCase_ ( self ) -> List[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]: if not batched: snake_case__ :int = image_inputs[0] if isinstance(UpperCamelCase ,Image.Image ): snake_case__ , snake_case__ :Any = image.size else: snake_case__ , snake_case__ :List[str] = image.shape[1], image.shape[2] if w < h: snake_case__ :List[str] = int(self.size["shortest_edge"] * h / w ) snake_case__ :Optional[int] = self.size["shortest_edge"] elif w > h: snake_case__ :Dict = self.size["shortest_edge"] snake_case__ :str = int(self.size["shortest_edge"] * w / h ) else: snake_case__ :Optional[Any] = self.size["shortest_edge"] snake_case__ :Dict = self.size["shortest_edge"] else: snake_case__ :Optional[int] = [] for image in image_inputs: snake_case__ , snake_case__ :List[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ :Tuple = max(UpperCamelCase ,key=lambda UpperCamelCase : item[0] )[0] snake_case__ :Tuple = max(UpperCamelCase ,key=lambda UpperCamelCase : item[1] )[1] return expected_height, expected_width def lowerCAmelCase_ ( self ) -> str: return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) ,masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) ,) @require_torch @require_vision class _snake_case ( _A , unittest.TestCase ): _A = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string _A = image_processing_class def lowerCAmelCase_ ( self ) -> str: snake_case__ :Union[str, Any] = OneFormerImageProcessorTester(self ) @property def lowerCAmelCase_ ( self ) -> Tuple: return self.image_processing_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self ) -> List[str]: snake_case__ :str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase ,"image_mean" ) ) self.assertTrue(hasattr(UpperCamelCase ,"image_std" ) ) self.assertTrue(hasattr(UpperCamelCase ,"do_normalize" ) ) self.assertTrue(hasattr(UpperCamelCase ,"do_resize" ) ) self.assertTrue(hasattr(UpperCamelCase ,"size" ) ) self.assertTrue(hasattr(UpperCamelCase ,"ignore_index" ) ) self.assertTrue(hasattr(UpperCamelCase ,"class_info_file" ) ) self.assertTrue(hasattr(UpperCamelCase ,"num_text" ) ) self.assertTrue(hasattr(UpperCamelCase ,"repo_path" ) ) self.assertTrue(hasattr(UpperCamelCase ,"metadata" ) ) self.assertTrue(hasattr(UpperCamelCase ,"do_reduce_labels" ) ) def lowerCAmelCase_ ( self ) -> str: pass def lowerCAmelCase_ ( self ) -> Optional[int]: # Initialize image_processor snake_case__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ :List[str] = prepare_image_inputs(self.image_processing_tester ,equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase ,Image.Image ) # Test not batched input snake_case__ :Any = image_processor(image_inputs[0] ,["semantic"] ,return_tensors="pt" ).pixel_values snake_case__ , snake_case__ :Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,) # Test batched snake_case__ , snake_case__ :Optional[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase ,batched=UpperCamelCase ) snake_case__ :Optional[int] = image_processor( UpperCamelCase ,["semantic"] * len(UpperCamelCase ) ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase_ ( self ) -> List[str]: # Initialize image_processor snake_case__ :Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ :Union[str, Any] = prepare_image_inputs(self.image_processing_tester ,equal_resolution=UpperCamelCase ,numpify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase ,np.ndarray ) # Test not batched input snake_case__ :str = image_processor(image_inputs[0] ,["semantic"] ,return_tensors="pt" ).pixel_values snake_case__ , snake_case__ :str = self.image_processing_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,) # Test batched snake_case__ , snake_case__ :int = self.image_processing_tester.get_expected_values(UpperCamelCase ,batched=UpperCamelCase ) snake_case__ :Tuple = image_processor( UpperCamelCase ,["semantic"] * len(UpperCamelCase ) ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase_ ( self ) -> Tuple: # Initialize image_processor snake_case__ :Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ :Tuple = prepare_image_inputs(self.image_processing_tester ,equal_resolution=UpperCamelCase ,torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase ,torch.Tensor ) # Test not batched input snake_case__ :Optional[Any] = image_processor(image_inputs[0] ,["semantic"] ,return_tensors="pt" ).pixel_values snake_case__ , snake_case__ :List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,) # Test batched snake_case__ , snake_case__ :Any = self.image_processing_tester.get_expected_values(UpperCamelCase ,batched=UpperCamelCase ) snake_case__ :List[Any] = image_processor( UpperCamelCase ,["semantic"] * len(UpperCamelCase ) ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase_ ( self ,UpperCamelCase=False ,UpperCamelCase=False ,UpperCamelCase="np" ) -> List[Any]: snake_case__ :Any = self.image_processing_class(**self.image_processor_dict ) # prepare image and target snake_case__ :Dict = self.image_processing_tester.num_labels snake_case__ :Tuple = None snake_case__ :Optional[Any] = None snake_case__ :Dict = prepare_image_inputs(self.image_processing_tester ,equal_resolution=UpperCamelCase ) if with_segmentation_maps: snake_case__ :str = num_labels if is_instance_map: snake_case__ :List[str] = list(range(UpperCamelCase ) ) * 2 snake_case__ :Tuple = dict(enumerate(UpperCamelCase ) ) snake_case__ :int = [ np.random.randint(0 ,high * 2 ,(img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": snake_case__ :int = [Image.fromarray(UpperCamelCase ) for annotation in annotations] snake_case__ :List[str] = image_processor( UpperCamelCase ,["semantic"] * len(UpperCamelCase ) ,UpperCamelCase ,return_tensors="pt" ,instance_id_to_semantic_id=UpperCamelCase ,pad_and_return_pixel_mask=UpperCamelCase ,) return inputs def lowerCAmelCase_ ( self ) -> int: pass def lowerCAmelCase_ ( self ) -> int: def common(UpperCamelCase=False ,UpperCamelCase=None ): snake_case__ :Any = self.comm_get_image_processor_inputs( with_segmentation_maps=UpperCamelCase ,is_instance_map=UpperCamelCase ,segmentation_type=UpperCamelCase ) snake_case__ :Optional[Any] = inputs["mask_labels"] snake_case__ :Any = inputs["class_labels"] snake_case__ :List[Any] = inputs["pixel_values"] snake_case__ :List[str] = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ): self.assertEqual(mask_label.shape[0] ,class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] ,pixel_values.shape[2:] ) self.assertEqual(len(UpperCamelCase ) ,self.image_processing_tester.num_text ) common() common(is_instance_map=UpperCamelCase ) common(is_instance_map=UpperCamelCase ,segmentation_type="pil" ) common(is_instance_map=UpperCamelCase ,segmentation_type="pil" ) def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :Optional[Any] = np.zeros((20, 50) ) snake_case__ :Optional[Any] = 1 snake_case__ :Optional[Any] = 1 snake_case__ :Any = 1 snake_case__ :Dict = binary_mask_to_rle(UpperCamelCase ) self.assertEqual(len(UpperCamelCase ) ,4 ) self.assertEqual(rle[0] ,21 ) self.assertEqual(rle[1] ,45 ) def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :List[str] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file="ade20k_panoptic.json" ,num_text=self.image_processing_tester.num_text ,repo_path="shi-labs/oneformer_demo" ,) snake_case__ :Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs() snake_case__ :List[Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase ) self.assertEqual(len(UpperCamelCase ) ,self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape ,( self.image_processing_tester.height, self.image_processing_tester.width, ) ,) snake_case__ :List[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] snake_case__ :Optional[Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase ,target_sizes=UpperCamelCase ) self.assertEqual(segmentation[0].shape ,target_sizes[0] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :List[str] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file="ade20k_panoptic.json" ,num_text=self.image_processing_tester.num_text ,repo_path="shi-labs/oneformer_demo" ,) snake_case__ :List[str] = self.image_processing_tester.get_fake_oneformer_outputs() snake_case__ :Dict = image_processor.post_process_instance_segmentation(UpperCamelCase ,threshold=0 ) self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) ,UpperCamelCase ) self.assertEqual( el["segmentation"].shape ,(self.image_processing_tester.height, self.image_processing_tester.width) ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :str = self.image_processing_class( num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file="ade20k_panoptic.json" ,num_text=self.image_processing_tester.num_text ,repo_path="shi-labs/oneformer_demo" ,) snake_case__ :Tuple = self.image_processing_tester.get_fake_oneformer_outputs() snake_case__ :List[Any] = image_processor.post_process_panoptic_segmentation(UpperCamelCase ,threshold=0 ) self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) ,UpperCamelCase ) self.assertEqual( el["segmentation"].shape ,(self.image_processing_tester.height, self.image_processing_tester.width) )
57
import os import sys import unittest __UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers") class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Tuple = find_backend(" if not is_torch_available():" ) self.assertEqual(UpperCamelCase ,"torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(UpperCamelCase ,"torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") snake_case__ :str = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :int = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" ,UpperCamelCase ) self.assertIn("torch_and_transformers" ,UpperCamelCase ) self.assertIn("flax_and_transformers" ,UpperCamelCase ) self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" ,objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] ) self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] ) def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" ) self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" ) snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" ) self.assertEqual( UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" ) self.assertEqual(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
57
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase : Optional[int] = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : str = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys __UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __UpperCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = ["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys __UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase : List[Any] = { "configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Any = [ "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "GraphormerForGraphClassification", "GraphormerModel", "GraphormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys __UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: # A mock response for an HTTP head request to emulate server down snake_case__ :Tuple = mock.Mock() snake_case__ :List[str] = 500 snake_case__ :Any = {} snake_case__ :Union[str, Any] = HTTPError snake_case__ :Tuple = {} # Download this model to make sure it's in the cache. snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def lowerCAmelCase_ ( self ) -> Dict: # A mock response for an HTTP head request to emulate server down snake_case__ :Union[str, Any] = mock.Mock() snake_case__ :int = 500 snake_case__ :Any = {} snake_case__ :Dict = HTTPError snake_case__ :List[Any] = {} # Download this model to make sure it's in the cache. snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase_ ( self ) -> int: # This test is for deprecated behavior and can be removed in v5 try: snake_case__ :Union[str, Any] = tempfile.mktemp() with open(UpperCamelCase ,"wb" ) as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase ) snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase ) finally: os.remove(UpperCamelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json" ,"wb" ) as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase ) snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json" ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ) @is_staging_test class _snake_case ( unittest.TestCase ): _A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def lowerCAmelCase_ ( cls ) -> Optional[int]: snake_case__ :List[str] = TOKEN HfFolder.save_token(UpperCamelCase ) @classmethod def lowerCAmelCase_ ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token ,repo_id="test-tokenizer" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" ) except HTTPError: pass def lowerCAmelCase_ ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :str = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token ) snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="test-tokenizer" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def lowerCAmelCase_ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Any = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token ) snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def lowerCAmelCase_ ( self ) -> Any: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase ) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase ) bert_tokenizer.save_pretrained(UpperCamelCase ) snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase ) tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" ) snake_case__ :List[str] = AutoTokenizer.from_pretrained( f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :int = Trie() trie.add("Hello 友達" ) self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} ) trie.add("Hello" ) trie.data self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} ) def lowerCAmelCase_ ( self ) -> int: snake_case__ :List[str] = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] ) trie.add("[CLS]" ) trie.add("extra_id_1" ) trie.add("extra_id_100" ) self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Optional[Any] = Trie() trie.add("A" ) self.assertEqual(trie.split("ABC" ) ,["A", "BC"] ) self.assertEqual(trie.split("BCA" ) ,["BC", "A"] ) def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :Any = Trie() trie.add("TOKEN]" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :List[Any] = Trie() trie.add("A" ) trie.add("P" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :str = Trie() trie.add("AB" ) trie.add("B" ) trie.add("C" ) self.assertEqual(trie.split("ABC" ) ,["AB", "C"] ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Dict = Trie() trie.add("ABC" ) trie.add("B" ) trie.add("CD" ) self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] ) def lowerCAmelCase_ ( self ) -> int: # Even if the offsets are wrong, we necessarily output correct string # parts. snake_case__ :Optional[int] = Trie() snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(UpperCamelCase ,["AB", "C"] )
57
1
from __future__ import annotations class _snake_case : def __init__( self ,UpperCamelCase ) -> None: snake_case__ :Optional[Any] = order # a_{0} ... a_{k} snake_case__ :Any = [1.0] + [0.0] * order # b_{0} ... b_{k} snake_case__ :List[Any] = [1.0] + [0.0] * order # x[n-1] ... x[n-k] snake_case__ :int = [0.0] * self.order # y[n-1] ... y[n-k] snake_case__ :Any = [0.0] * self.order def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> None: if len(UpperCamelCase ) < self.order: snake_case__ :Tuple = [1.0, *a_coeffs] if len(UpperCamelCase ) != self.order + 1: snake_case__ :Any = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(UpperCamelCase )}' ) raise ValueError(UpperCamelCase ) if len(UpperCamelCase ) != self.order + 1: snake_case__ :Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(UpperCamelCase )}' ) raise ValueError(UpperCamelCase ) snake_case__ :List[str] = a_coeffs snake_case__ :List[str] = b_coeffs def lowerCAmelCase_ ( self ,UpperCamelCase ) -> float: snake_case__ :Tuple = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 ,self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) snake_case__ :str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] snake_case__ :str = self.input_history[:-1] snake_case__ :List[str] = self.output_history[:-1] snake_case__ :Any = sample snake_case__ :str = result return result
57
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase : Optional[Any] = 1_6 __UpperCAmelCase : Optional[int] = 3_2 def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]: '''simple docstring''' snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case ) snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" ) def tokenize_function(__snake_case : Tuple ): # max_length=None => use the model max length (it's actually the default) snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case__ :List[Any] = datasets.map( __snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__snake_case : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. snake_case__ :Any = DataLoader( tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) snake_case__ :Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple: '''simple docstring''' model.eval() snake_case__ :Union[str, Any] = 0 for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case__ :List[Any] = model(**__snake_case ) snake_case__ :Any = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times snake_case__ , snake_case__ :Tuple = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__snake_case ) - 1: snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen] snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__snake_case , references=__snake_case , ) snake_case__ :int = metric.compute() return eval_metric["accuracy"] def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any: '''simple docstring''' snake_case__ :Any = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case__ :Union[str, Any] = config["lr"] snake_case__ :List[str] = int(config["num_epochs"] ) snake_case__ :Optional[Any] = int(config["seed"] ) snake_case__ :List[Any] = int(config["batch_size"] ) snake_case__ :List[Any] = args.model_name_or_path set_seed(__snake_case ) snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case ) # Instantiate optimizer snake_case__ :int = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case ) if accelerator.state.deepspeed_plugin is not None: snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: snake_case__ :Any = 1 snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case__ :Optional[Any] = get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , ) else: snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # We need to keep track of how many total steps we have iterated over snake_case__ :Dict = 0 # We also need to keep track of the stating epoch so files are named properly snake_case__ :Union[str, Any] = 0 snake_case__ :List[str] = evaluate.load("glue" , "mrpc" ) snake_case__ :Optional[Any] = num_epochs if args.partial_train_epoch is not None: snake_case__ :List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1] snake_case__ :Dict = "" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break snake_case__ :str = int(__snake_case ) + 1 snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) accelerator.print("resumed checkpoint performance:" , __snake_case ) accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] ) accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] ) with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f: snake_case__ :Tuple = json.load(__snake_case ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model snake_case__ :Optional[int] = {} for epoch in range(__snake_case , __snake_case ): model.train() for step, batch in enumerate(__snake_case ): snake_case__ :str = model(**__snake_case ) snake_case__ :List[str] = outputs.loss snake_case__ :List[Any] = loss / gradient_accumulation_steps accelerator.backward(__snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 snake_case__ :int = F'epoch_{epoch}' snake_case__ :str = os.path.join(args.output_dir , __snake_case ) accelerator.save_state(__snake_case ) snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) snake_case__ :List[str] = accuracy snake_case__ :List[str] = lr_scheduler.get_lr()[0] snake_case__ :List[Any] = optimizer.param_groups[0]["lr"] snake_case__ :Dict = epoch snake_case__ :List[Any] = overall_step accelerator.print(F'epoch {epoch}:' , __snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f: json.dump(__snake_case , __snake_case ) def lowercase_ ( ) -> Any: '''simple docstring''' snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , ) parser.add_argument( "--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , ) parser.add_argument( "--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , ) snake_case__ :Any = parser.parse_args() snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
57
1
import unittest import numpy as np def lowercase_ ( __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : np.ndarray | None = None , ) -> np.ndarray: '''simple docstring''' snake_case__ :Any = np.shape(__snake_case ) snake_case__ :Dict = np.shape(__snake_case ) snake_case__ :Tuple = np.shape(__snake_case ) if shape_a[0] != shape_b[0]: snake_case__ :int = ( "Expected the same number of rows for A and B. " F'Instead found A of size {shape_a} and B of size {shape_b}' ) raise ValueError(__snake_case ) if shape_b[1] != shape_c[1]: snake_case__ :Dict = ( "Expected the same number of columns for B and C. " F'Instead found B of size {shape_b} and C of size {shape_c}' ) raise ValueError(__snake_case ) snake_case__ :List[Any] = pseudo_inv if a_inv is None: try: snake_case__ :Dict = np.linalg.inv(__snake_case ) except np.linalg.LinAlgError: raise ValueError( "Input matrix A is not invertible. Cannot compute Schur complement." ) return mat_c - mat_b.T @ a_inv @ mat_b class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> None: snake_case__ :Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) snake_case__ :Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) snake_case__ :List[str] = np.array([[2, 1], [6, 3]] ) snake_case__ :List[str] = schur_complement(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) snake_case__ :Any = np.block([[a, b], [b.T, c]] ) snake_case__ :List[str] = np.linalg.det(UpperCamelCase ) snake_case__ :Dict = np.linalg.det(UpperCamelCase ) snake_case__ :Any = np.linalg.det(UpperCamelCase ) self.assertAlmostEqual(UpperCamelCase ,det_a * det_s ) def lowerCAmelCase_ ( self ) -> None: snake_case__ :Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) snake_case__ :List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) snake_case__ :Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(UpperCamelCase ): schur_complement(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> None: snake_case__ :Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) snake_case__ :str = np.array([[0, 3], [3, 0], [2, 3]] ) snake_case__ :Dict = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(UpperCamelCase ): schur_complement(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
57
from __future__ import annotations class _snake_case : def __init__( self ,UpperCamelCase ) -> None: snake_case__ :Union[str, Any] = data snake_case__ :Node | None = None snake_case__ :Node | None = None def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowercase_ ( __snake_case : Node | None ) -> int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowercase_ ( __snake_case : Node ) -> bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowercase_ ( ) -> None: # Main function for testing. '''simple docstring''' snake_case__ :Dict = Node(1 ) snake_case__ :int = Node(2 ) snake_case__ :Optional[Any] = Node(3 ) snake_case__ :Tuple = Node(4 ) snake_case__ :str = Node(5 ) snake_case__ :Optional[Any] = Node(6 ) snake_case__ :List[Any] = Node(7 ) snake_case__ :List[str] = Node(8 ) snake_case__ :Tuple = Node(9 ) print(is_full_binary_tree(__snake_case ) ) print(depth_of_tree(__snake_case ) ) print("Tree is: " ) display(__snake_case ) if __name__ == "__main__": main()
57
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase : int = { "configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Tuple = [ "LILT_PRETRAINED_MODEL_ARCHIVE_LIST", "LiltForQuestionAnswering", "LiltForSequenceClassification", "LiltForTokenClassification", "LiltModel", "LiltPreTrainedModel", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys __UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __UpperCAmelCase : List[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __UpperCAmelCase : int = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("\n".join(upper_files) + "\n") __UpperCAmelCase : Any = [file for file in filepaths if " " in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("\n".join(space_files) + "\n") __UpperCAmelCase : str = [file for file in filepaths if "-" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("\n".join(hyphen_files) + "\n") __UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("\n".join(nodir_files) + "\n") __UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
57
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: __UpperCAmelCase : Any = None __UpperCAmelCase : int = logging.get_logger(__name__) __UpperCAmelCase : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : str = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } __UpperCAmelCase : Optional[int] = { "camembert-base": 5_1_2, } __UpperCAmelCase : int = "▁" class _snake_case ( _A ): _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ['input_ids', 'attention_mask'] _A = CamembertTokenizer def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="</s>" ,UpperCamelCase="</s>" ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase=["<s>NOTUSED", "</s>NOTUSED"] ,**UpperCamelCase ,) -> int: # Mask token behave like a normal word, i.e. include the space before it snake_case__ :List[str] = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else mask_token super().__init__( UpperCamelCase ,tokenizer_file=UpperCamelCase ,bos_token=UpperCamelCase ,eos_token=UpperCamelCase ,sep_token=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,additional_special_tokens=UpperCamelCase ,**UpperCamelCase ,) snake_case__ :int = vocab_file snake_case__ :Dict = False if not self.vocab_file else True def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ :Optional[int] = [self.cls_token_id] snake_case__ :Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]: snake_case__ :str = [self.sep_token_id] snake_case__ :List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return snake_case__ :Dict = os.path.join( UpperCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.vocab_file ,UpperCamelCase ) return (out_vocab_file,)
57
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]: '''simple docstring''' snake_case__ :Dict = "" for i in table: res += inp[i - 1] return res def lowercase_ ( __snake_case : List[str] ) -> int: '''simple docstring''' return data[1:] + data[0] def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case__ :Union[str, Any] = "" for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case__ :int = int("0b" + data[0] + data[-1] , 2 ) snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]: '''simple docstring''' snake_case__ :Tuple = message[:4] snake_case__ :int = message[4:] snake_case__ :int = apply_table(__snake_case , __snake_case ) snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case ) snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741 snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] ) snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741 snake_case__ :int = "0" * (2 - len(__snake_case )) + r snake_case__ :Optional[Any] = apply_table(l + r , __snake_case ) snake_case__ :Tuple = xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": __UpperCAmelCase : Dict = input("Enter 10 bit key: ") __UpperCAmelCase : Tuple = input("Enter 8 bit message: ") __UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9] __UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] __UpperCAmelCase : Tuple = [2, 4, 3, 1] __UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7] __UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] __UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1] __UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __UpperCAmelCase : int = apply_table(key, paa_table) __UpperCAmelCase : Dict = temp[:5] __UpperCAmelCase : Optional[int] = temp[5:] __UpperCAmelCase : Optional[int] = left_shift(left) __UpperCAmelCase : Union[str, Any] = left_shift(right) __UpperCAmelCase : int = apply_table(left + right, pa_table) __UpperCAmelCase : Tuple = left_shift(left) __UpperCAmelCase : Union[str, Any] = left_shift(right) __UpperCAmelCase : Dict = left_shift(left) __UpperCAmelCase : Optional[Any] = left_shift(right) __UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table) # encryption __UpperCAmelCase : Tuple = apply_table(message, IP) __UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : List[Any] = temp[4:] + temp[:4] __UpperCAmelCase : int = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption __UpperCAmelCase : List[Any] = apply_table(CT, IP) __UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : int = temp[4:] + temp[:4] __UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
57
1
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class _snake_case ( unittest.TestCase ): _A = inspect.getfile(accelerate.test_utils ) _A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) _A = ['accelerate', 'launch'] _A = Path.home() / '.cache/huggingface/accelerate' _A = 'default_config.yaml' _A = config_folder / config_file _A = config_folder / '_default_config.yaml' _A = Path('tests/test_configs' ) @classmethod def lowerCAmelCase_ ( cls ) -> List[str]: if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def lowerCAmelCase_ ( cls ) -> List[str]: if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Tuple = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy() ) def lowerCAmelCase_ ( self ) -> int: for config in sorted(self.test_config_path.glob("**/*.yaml" ) ): with self.subTest(config_file=UpperCamelCase ): execute_subprocess_async( self.base_cmd + ["--config_file", str(UpperCamelCase ), self.test_file_path] ,env=os.environ.copy() ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: execute_subprocess_async(["accelerate", "test"] ,env=os.environ.copy() ) class _snake_case ( unittest.TestCase ): _A = 'test-tpu' _A = 'us-central1-a' _A = 'ls' _A = ['accelerate', 'tpu-config'] _A = 'cd /usr/share' _A = 'tests/test_samples/test_command_file.sh' _A = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :Dict = run_command( self.cmd + ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] ,return_stdout=UpperCamelCase ,) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,UpperCamelCase ,) def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :Tuple = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] ,return_stdout=UpperCamelCase ,) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,UpperCamelCase ,) def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :Dict = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] ,return_stdout=UpperCamelCase ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,UpperCamelCase ,) def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :Dict = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] ,return_stdout=UpperCamelCase ,) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,UpperCamelCase ,) def lowerCAmelCase_ ( self ) -> List[str]: snake_case__ :Tuple = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--command", "echo \"Hello World\"", "--debug", ] ,return_stdout=UpperCamelCase ,) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' ,UpperCamelCase ,) def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :int = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] ,return_stdout=UpperCamelCase ,) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,UpperCamelCase ,) def lowerCAmelCase_ ( self ) -> int: snake_case__ :Optional[Any] = run_command( self.cmd + [ "--config_file", "tests/test_configs/0_12_0.yaml", "--command_file", self.command_file, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug", ] ,return_stdout=UpperCamelCase ,) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,UpperCamelCase ,) def lowerCAmelCase_ ( self ) -> int: snake_case__ :Optional[Any] = run_command( self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] ,return_stdout=UpperCamelCase ,) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' ,UpperCamelCase ,) def lowerCAmelCase_ ( self ) -> List[str]: snake_case__ :List[Any] = run_command( self.cmd + [ "--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--accelerate_version", "12.0.0", "--debug", ] ,return_stdout=UpperCamelCase ,) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' ,UpperCamelCase ,)
57
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _snake_case ( _A , _A , _A ): @register_to_config def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int: super().__init__() snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :Any = False snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase ) snake_case__ :Tuple = TaConfig( vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,) snake_case__ :List[str] = nn.ModuleList() for lyr_num in range(UpperCamelCase ): snake_case__ :List[Any] = TaBlock(UpperCamelCase ) self.encoders.append(UpperCamelCase ) snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase ) snake_case__ :Any = nn.Dropout(p=UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int: snake_case__ :str = self.token_embedder(UpperCamelCase ) snake_case__ :int = encoder_input_tokens.shape[1] snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase ) snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase ) # inverted the attention mask snake_case__ :Optional[Any] = encoder_input_tokens.size() snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase ) for lyr in self.encoders: snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0] snake_case__ :List[Any] = self.layer_norm(UpperCamelCase ) return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
57
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase : List[Any] = { "configuration_distilbert": [ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Dict = ["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : str = [ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Dict = [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : Union[str, Any] = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} __UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"] def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]: '''simple docstring''' snake_case__ :List[Any] = start # add current to visited visited.append(__snake_case ) snake_case__ :List[str] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # if all neighbors visited add current to sort sort.append(__snake_case ) # if all vertices haven't been visited select a new one to visit if len(__snake_case ) != len(__snake_case ): for vertice in vertices: if vertice not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # return sort return sort if __name__ == "__main__": __UpperCAmelCase : Tuple = topological_sort("a", [], []) print(sort)
57
1
def lowercase_ ( __snake_case : int ) -> list: '''simple docstring''' snake_case__ :Optional[Any] = int(__snake_case ) if n_element < 1: snake_case__ :str = ValueError("a should be a positive number" ) raise my_error snake_case__ :List[str] = [1] snake_case__ , snake_case__ , snake_case__ :List[str] = (0, 0, 0) snake_case__ :Tuple = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": __UpperCAmelCase : List[Any] = input("Enter the last number (nth term) of the Hamming Number Series: ") print("Formula of Hamming Number Series => 2^i * 3^j * 5^k") __UpperCAmelCase : Optional[Any] = hamming(int(n)) print("-----------------------------------------------------") print(F'''The list with nth numbers is: {hamming_numbers}''') print("-----------------------------------------------------")
57
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self ) -> str: snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ :List[str] = controlnet_params snake_case__ :Union[str, Any] = "bird" snake_case__ :Optional[int] = jax.device_count() snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case__ :Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples ) snake_case__ :List[str] = jax.random.PRNGKey(0 ) snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() ) snake_case__ :int = replicate(UpperCamelCase ) snake_case__ :Any = shard(UpperCamelCase ) snake_case__ :Any = shard(UpperCamelCase ) snake_case__ :str = pipe( prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case__ :Any = images[0, 253:256, 253:256, -1] snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case__ :List[Any] = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ :str = controlnet_params snake_case__ :int = "Chef in the kitchen" snake_case__ :List[Any] = jax.device_count() snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case__ :Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples ) snake_case__ :List[str] = jax.random.PRNGKey(0 ) snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() ) snake_case__ :Dict = replicate(UpperCamelCase ) snake_case__ :Tuple = shard(UpperCamelCase ) snake_case__ :Optional[int] = shard(UpperCamelCase ) snake_case__ :Optional[Any] = pipe( prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case__ :List[str] = images[0, 253:256, 253:256, -1] snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case__ :List[str] = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
57
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device __UpperCAmelCase : Tuple = False class _snake_case ( unittest.TestCase ): pass @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ :int = "A painting of a squirrel eating a burger " snake_case__ :int = torch.manual_seed(0 ) snake_case__ :List[str] = pipe( prompt=UpperCamelCase ,generator=UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCamelCase ) snake_case__ :Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCamelCase ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ :List[str] = generator.manual_seed(0 ) snake_case__ :int = pipe( prompt=UpperCamelCase ,generator=UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :Dict = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion" ,torch_dtype=torch.floataa ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ :Optional[Any] = "A painting of a squirrel eating a burger " snake_case__ :Optional[Any] = torch.manual_seed(0 ) snake_case__ :Any = pipe( prompt=UpperCamelCase ,generator=UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type="numpy" ).images snake_case__ :Any = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) snake_case__ :int = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
57
def lowercase_ ( __snake_case : list ) -> list: '''simple docstring''' if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ): raise TypeError("Sequence must be list of non-negative integers" ) for _ in range(len(__snake_case ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
57
1
import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( __snake_case : Tuple , __snake_case : str , __snake_case : List[Any] ) -> Optional[Any]: '''simple docstring''' return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :] def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[str]="attention" ) -> Tuple: '''simple docstring''' snake_case__ :List[str] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] ) snake_case__ :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) snake_case__ :Dict = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] ) snake_case__ :Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) snake_case__ :Optional[Any] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] ) snake_case__ :Union[str, Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) snake_case__ :List[str] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] ) snake_case__ :str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowercase_ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : int=False ) -> List[Any]: '''simple docstring''' if split_mlp_wi: snake_case__ :str = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :] snake_case__ :str = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :] snake_case__ :int = (wi_a, wi_a) else: snake_case__ :Optional[int] = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :] snake_case__ :Optional[int] = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :] return wi, wo def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Any ) -> Optional[int]: '''simple docstring''' return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i] def lowercase_ ( __snake_case : dict , *, __snake_case : int , __snake_case : bool , __snake_case : bool = False ) -> Dict: '''simple docstring''' snake_case__ :Tuple = traverse_util.flatten_dict(variables["target"] ) snake_case__ :Dict = {"/".join(__snake_case ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi snake_case__ :Optional[Any] = "encoder/encoder/mlp/wi_0/kernel" in old print("Split MLP:" , __snake_case ) snake_case__ :Union[str, Any] = collections.OrderedDict() # Shared embeddings. snake_case__ :List[str] = old["token_embedder/embedding"] # Encoder. for i in range(__snake_case ): # Block i, layer 0 (Self Attention). snake_case__ :Tuple = tax_layer_norm_lookup(__snake_case , __snake_case , "encoder" , "pre_attention_layer_norm" ) snake_case__ , snake_case__ , snake_case__ , snake_case__ :List[str] = tax_attention_lookup(__snake_case , __snake_case , "encoder" , "attention" ) snake_case__ :str = layer_norm snake_case__ :Optional[int] = k.T snake_case__ :Tuple = o.T snake_case__ :Optional[int] = q.T snake_case__ :List[str] = v.T # Block i, layer 1 (MLP). snake_case__ :Any = tax_layer_norm_lookup(__snake_case , __snake_case , "encoder" , "pre_mlp_layer_norm" ) snake_case__ , snake_case__ :Union[str, Any] = tax_mlp_lookup(__snake_case , __snake_case , "encoder" , __snake_case ) snake_case__ :List[Any] = layer_norm if split_mlp_wi: snake_case__ :int = wi[0].T snake_case__ :Optional[Any] = wi[1].T else: snake_case__ :List[str] = wi.T snake_case__ :Dict = wo.T if scalable_attention: # convert the rel_embedding of each layer snake_case__ :str = tax_relpos_bias_lookup( __snake_case , __snake_case , "encoder" ).T snake_case__ :int = old["encoder/encoder_norm/scale"] if not scalable_attention: snake_case__ :List[Any] = tax_relpos_bias_lookup( __snake_case , 0 , "encoder" ).T snake_case__ :Union[str, Any] = tax_relpos_bias_lookup( __snake_case , 0 , "decoder" ).T if not is_encoder_only: # Decoder. for i in range(__snake_case ): # Block i, layer 0 (Self Attention). snake_case__ :Dict = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_self_attention_layer_norm" ) snake_case__ , snake_case__ , snake_case__ , snake_case__ :List[Any] = tax_attention_lookup(__snake_case , __snake_case , "decoder" , "self_attention" ) snake_case__ :List[Any] = layer_norm snake_case__ :Tuple = k.T snake_case__ :Optional[int] = o.T snake_case__ :Tuple = q.T snake_case__ :List[str] = v.T # Block i, layer 1 (Cross Attention). snake_case__ :List[Any] = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_cross_attention_layer_norm" ) snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = tax_attention_lookup(__snake_case , __snake_case , "decoder" , "encoder_decoder_attention" ) snake_case__ :List[Any] = layer_norm snake_case__ :Optional[Any] = k.T snake_case__ :List[Any] = o.T snake_case__ :str = q.T snake_case__ :Optional[Any] = v.T # Block i, layer 2 (MLP). snake_case__ :Optional[int] = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_mlp_layer_norm" ) snake_case__ , snake_case__ :Dict = tax_mlp_lookup(__snake_case , __snake_case , "decoder" , __snake_case ) snake_case__ :int = layer_norm if split_mlp_wi: snake_case__ :Optional[Any] = wi[0].T snake_case__ :List[str] = wi[1].T else: snake_case__ :Optional[int] = wi.T snake_case__ :Optional[int] = wo.T if scalable_attention: # convert the rel_embedding of each layer snake_case__ :Optional[Any] = tax_relpos_bias_lookup(__snake_case , __snake_case , "decoder" ).T snake_case__ :Dict = old["decoder/decoder_norm/scale"] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: snake_case__ :Optional[Any] = old["decoder/logits_dense/kernel"].T return new def lowercase_ ( __snake_case : str , __snake_case : bool ) -> Any: '''simple docstring''' snake_case__ :Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: snake_case__ :Tuple = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: snake_case__ :Optional[Any] = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) snake_case__ :Optional[int] = state_dict["shared.weight"] return state_dict def lowercase_ ( __snake_case : str , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Any ) -> Optional[int]: '''simple docstring''' snake_case__ :List[str] = checkpoints.load_tax_checkpoint(__snake_case ) snake_case__ :List[Any] = convert_tax_to_pytorch( __snake_case , num_layers=config.num_layers , is_encoder_only=__snake_case , scalable_attention=__snake_case ) snake_case__ :Optional[Any] = make_state_dict(__snake_case , __snake_case ) model.load_state_dict(__snake_case , strict=__snake_case ) def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : bool = False , __snake_case : bool = False , ) -> List[Any]: '''simple docstring''' snake_case__ :Dict = MTaConfig.from_json_file(__snake_case ) print(F'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: snake_case__ :Any = UMTaEncoderModel(__snake_case ) else: snake_case__ :List[Any] = UMTaForConditionalGeneration(__snake_case ) # Load weights from tf checkpoint load_tax_weights_in_ta(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(__snake_case ) # Verify that we can load the checkpoint. model.from_pretrained(__snake_case ) print("Done" ) if __name__ == "__main__": __UpperCAmelCase : Dict = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) parser.add_argument( "--scalable_attention", action="store_true", help="Whether the model uses scaled attention (umt5 model)", default=False, ) __UpperCAmelCase : str = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
57
from __future__ import annotations def lowercase_ ( __snake_case : list ) -> float: '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(__snake_case ) / len(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
57
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __UpperCAmelCase : Tuple = logging.get_logger(__name__) def lowercase_ ( __snake_case : Dict ) -> List[List[ImageInput]]: '''simple docstring''' if isinstance(__snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__snake_case , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__snake_case ): return [[videos]] raise ValueError(F'Could not make batched video from {videos}' ) class _snake_case ( _A ): _A = ['pixel_values'] def __init__( self ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = 1 / 255 ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = None ,**UpperCamelCase ,) -> None: super().__init__(**UpperCamelCase ) snake_case__ :Union[str, Any] = size if size is not None else {"shortest_edge": 224} snake_case__ :Any = get_size_dict(UpperCamelCase ,default_to_square=UpperCamelCase ) snake_case__ :Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224} snake_case__ :Optional[Any] = get_size_dict(UpperCamelCase ,param_name="crop_size" ) snake_case__ :Any = do_resize snake_case__ :str = size snake_case__ :Optional[Any] = do_center_crop snake_case__ :Union[str, Any] = crop_size snake_case__ :int = resample snake_case__ :List[str] = do_rescale snake_case__ :Optional[Any] = rescale_factor snake_case__ :str = do_normalize snake_case__ :str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case__ :Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray: snake_case__ :Any = get_size_dict(UpperCamelCase ,default_to_square=UpperCamelCase ) if "shortest_edge" in size: snake_case__ :Any = get_resize_output_image_size(UpperCamelCase ,size["shortest_edge"] ,default_to_square=UpperCamelCase ) elif "height" in size and "width" in size: snake_case__ :int = (size["height"], size["width"]) else: raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray: snake_case__ :Optional[Any] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(UpperCamelCase ,size=(size["height"], size["width"]) ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> Tuple: return rescale(UpperCamelCase ,scale=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray: return normalize(UpperCamelCase ,mean=UpperCamelCase ,std=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. snake_case__ :Any = to_numpy_array(UpperCamelCase ) if do_resize: snake_case__ :str = self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) if do_center_crop: snake_case__ :Optional[int] = self.center_crop(UpperCamelCase ,size=UpperCamelCase ) if do_rescale: snake_case__ :int = self.rescale(image=UpperCamelCase ,scale=UpperCamelCase ) if do_normalize: snake_case__ :Any = self.normalize(image=UpperCamelCase ,mean=UpperCamelCase ,std=UpperCamelCase ) snake_case__ :Optional[Any] = to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) return image def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image: snake_case__ :Any = do_resize if do_resize is not None else self.do_resize snake_case__ :Dict = resample if resample is not None else self.resample snake_case__ :Dict = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case__ :List[Any] = do_rescale if do_rescale is not None else self.do_rescale snake_case__ :Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ :Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize snake_case__ :Dict = image_mean if image_mean is not None else self.image_mean snake_case__ :Optional[int] = image_std if image_std is not None else self.image_std snake_case__ :str = size if size is not None else self.size snake_case__ :Union[str, Any] = get_size_dict(UpperCamelCase ,default_to_square=UpperCamelCase ) snake_case__ :Dict = crop_size if crop_size is not None else self.crop_size snake_case__ :Tuple = get_size_dict(UpperCamelCase ,param_name="crop_size" ) if not valid_images(UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) snake_case__ :int = make_batched(UpperCamelCase ) snake_case__ :int = [ [ self._preprocess_image( image=UpperCamelCase ,do_resize=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ,do_center_crop=UpperCamelCase ,crop_size=UpperCamelCase ,do_rescale=UpperCamelCase ,rescale_factor=UpperCamelCase ,do_normalize=UpperCamelCase ,image_mean=UpperCamelCase ,image_std=UpperCamelCase ,data_format=UpperCamelCase ,) for img in video ] for video in videos ] snake_case__ :List[str] = {"pixel_values": videos} return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
57
from __future__ import annotations import math def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int: '''simple docstring''' if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__snake_case ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) return min( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) def lowercase_ ( ) -> None: '''simple docstring''' snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] snake_case__ :int = math.log(len(__snake_case ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
57
1
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=32 ,UpperCamelCase=2 ,UpperCamelCase=3 ,UpperCamelCase=16 ,UpperCamelCase=[32, 64, 128] ,UpperCamelCase=[1, 2, 1] ,UpperCamelCase=[2, 2, 4] ,UpperCamelCase=2 ,UpperCamelCase=2.0 ,UpperCamelCase=True ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,UpperCamelCase=0.1 ,UpperCamelCase="gelu" ,UpperCamelCase=False ,UpperCamelCase=True ,UpperCamelCase=0.02 ,UpperCamelCase=1E-5 ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=10 ,UpperCamelCase=8 ,UpperCamelCase=["stage1", "stage2"] ,UpperCamelCase=[1, 2] ,) -> Optional[Any]: snake_case__ :int = parent snake_case__ :Tuple = batch_size snake_case__ :int = image_size snake_case__ :Any = patch_size snake_case__ :Any = num_channels snake_case__ :Union[str, Any] = embed_dim snake_case__ :Any = hidden_sizes snake_case__ :Dict = depths snake_case__ :int = num_heads snake_case__ :int = window_size snake_case__ :Optional[int] = mlp_ratio snake_case__ :List[str] = qkv_bias snake_case__ :Optional[Any] = hidden_dropout_prob snake_case__ :Optional[int] = attention_probs_dropout_prob snake_case__ :Optional[Any] = drop_path_rate snake_case__ :Optional[int] = hidden_act snake_case__ :str = use_absolute_embeddings snake_case__ :Any = patch_norm snake_case__ :int = layer_norm_eps snake_case__ :str = initializer_range snake_case__ :Tuple = is_training snake_case__ :Any = scope snake_case__ :Any = use_labels snake_case__ :List[Any] = type_sequence_label_size snake_case__ :List[str] = encoder_stride snake_case__ :str = out_features snake_case__ :List[str] = out_indices def lowerCAmelCase_ ( self ) -> int: snake_case__ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ :Dict = None if self.use_labels: snake_case__ :Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) snake_case__ :Any = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self ) -> Union[str, Any]: return FocalNetConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]: snake_case__ :List[Any] = FocalNetModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ :Any = model(UpperCamelCase ) snake_case__ :Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case__ :Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]: snake_case__ :List[Any] = FocalNetBackbone(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ :List[str] = model(UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] ) # verify backbone works with out_features=None snake_case__ :Optional[int] = None snake_case__ :Union[str, Any] = FocalNetBackbone(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ :Tuple = model(UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int: snake_case__ :Any = FocalNetForMaskedImageModeling(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ :str = model(UpperCamelCase ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case__ :Union[str, Any] = 1 snake_case__ :str = FocalNetForMaskedImageModeling(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ :str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ :Optional[int] = model(UpperCamelCase ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]: snake_case__ :List[Any] = self.type_sequence_label_size snake_case__ :int = FocalNetForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ :str = model(UpperCamelCase ,labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case__ :List[str] = 1 snake_case__ :Union[str, Any] = FocalNetForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ :str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ :Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self ) -> Any: snake_case__ :List[str] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ :str = config_and_inputs snake_case__ :Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _snake_case ( _A , _A , unittest.TestCase ): _A = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) _A = ( {'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification} if is_torch_available() else {} ) _A = False _A = False _A = False _A = False _A = False def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Any = FocalNetModelTester(self ) snake_case__ :Any = ConfigTester(self ,config_class=UpperCamelCase ,embed_dim=37 ,has_text_modality=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self ) -> str: return def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[str]: snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @unittest.skip(reason="FocalNet does not use inputs_embeds" ) def lowerCAmelCase_ ( self ) -> Dict: pass @unittest.skip(reason="FocalNet does not use feedforward chunking" ) def lowerCAmelCase_ ( self ) -> List[Any]: pass def lowerCAmelCase_ ( self ) -> Dict: snake_case__ , snake_case__ :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: snake_case__ :List[Any] = model_class(UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) snake_case__ :Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase ,nn.Linear ) ) def lowerCAmelCase_ ( self ) -> str: snake_case__ , snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: snake_case__ :Union[str, Any] = model_class(UpperCamelCase ) snake_case__ :int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ :Tuple = [*signature.parameters.keys()] snake_case__ :List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] ,UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]: snake_case__ :List[str] = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): snake_case__ :Any = model(**self._prepare_for_class(UpperCamelCase ,UpperCamelCase ) ) snake_case__ :Optional[int] = outputs.hidden_states snake_case__ :Union[str, Any] = getattr( self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCamelCase ) ,UpperCamelCase ) # FocalNet has a different seq_length snake_case__ :List[str] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case__ :Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) snake_case__ :List[str] = outputs.reshaped_hidden_states self.assertEqual(len(UpperCamelCase ) ,UpperCamelCase ) snake_case__ , snake_case__ , snake_case__ , snake_case__ :Dict = reshaped_hidden_states[0].shape snake_case__ :Any = ( reshaped_hidden_states[0].view(UpperCamelCase ,UpperCamelCase ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def lowerCAmelCase_ ( self ) -> str: snake_case__ , snake_case__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ :Optional[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: snake_case__ :List[Any] = True self.check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ :Any = True self.check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ , snake_case__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ :List[Any] = 3 snake_case__ :List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case__ :Optional[int] = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case__ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case__ :Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: snake_case__ :Tuple = True self.check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ :int = True self.check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,(padded_height, padded_width) ) @slow def lowerCAmelCase_ ( self ) -> str: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ :Tuple = FocalNetModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def lowerCAmelCase_ ( self ) -> str: snake_case__ , snake_case__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ :int = _config_zero_init(UpperCamelCase ) for model_class in self.all_model_classes: snake_case__ :Dict = model_class(config=UpperCamelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,) @require_vision @require_torch class _snake_case ( unittest.TestCase ): @cached_property def lowerCAmelCase_ ( self ) -> Union[str, Any]: # TODO update organization return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None @slow def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ :str = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCamelCase ) snake_case__ :Optional[int] = self.default_image_processor snake_case__ :Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) snake_case__ :int = image_processor(images=UpperCamelCase ,return_tensors="pt" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): snake_case__ :Union[str, Any] = model(**UpperCamelCase ) # verify the logits snake_case__ :Tuple = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,UpperCamelCase ) snake_case__ :Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCamelCase ,atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 ) @require_torch class _snake_case ( _A , unittest.TestCase ): _A = (FocalNetBackbone,) if is_torch_available() else () _A = FocalNetConfig _A = False def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ :Any = FocalNetModelTester(self )
57
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any: '''simple docstring''' snake_case__ :Optional[Any] = b.T snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 ) snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 ) snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case ) snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :] return d def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any: '''simple docstring''' snake_case__ :Optional[Any] = x.reshape(-1 , 3 ) snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case ) return np.argmin(__snake_case , axis=1 ) class _snake_case ( _A ): _A = ['pixel_values'] def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None: super().__init__(**UpperCamelCase ) snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256} snake_case__ :str = get_size_dict(UpperCamelCase ) snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None snake_case__ :str = do_resize snake_case__ :List[str] = size snake_case__ :List[Any] = resample snake_case__ :Union[str, Any] = do_normalize snake_case__ :int = do_color_quantize def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray: snake_case__ :List[str] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' ) return resize( UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray: snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase ) snake_case__ :List[Any] = image - 1 return image def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image: snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize snake_case__ :int = size if size is not None else self.size snake_case__ :Tuple = get_size_dict(UpperCamelCase ) snake_case__ :str = resample if resample is not None else self.resample snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize snake_case__ :List[Any] = clusters if clusters is not None else self.clusters snake_case__ :str = np.array(UpperCamelCase ) snake_case__ :int = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images] if do_normalize: snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images] if do_color_quantize: snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) snake_case__ :Union[str, Any] = np.array(UpperCamelCase ) snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) snake_case__ :List[Any] = images.shape[0] snake_case__ :str = images.reshape(UpperCamelCase ,-1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. snake_case__ :Any = list(UpperCamelCase ) else: snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images] snake_case__ :List[str] = {"input_ids": images} return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
57
1
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __UpperCAmelCase : Dict = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="relu") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation="relu")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation="relu")) classifier.add(layers.Dense(units=1, activation="sigmoid")) # Compiling the CNN classifier.compile( optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __UpperCAmelCase : str = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __UpperCAmelCase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) __UpperCAmelCase : List[Any] = train_datagen.flow_from_directory( "dataset/training_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary" ) __UpperCAmelCase : Tuple = test_datagen.flow_from_directory( "dataset/test_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save("cnn.h5") # Part 3 - Making new predictions __UpperCAmelCase : Tuple = tf.keras.preprocessing.image.load_img( "dataset/single_prediction/image.png", target_size=(6_4, 6_4) ) __UpperCAmelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) __UpperCAmelCase : Tuple = np.expand_dims(test_image, axis=0) __UpperCAmelCase : Optional[Any] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __UpperCAmelCase : Dict = "Normal" if result[0][0] == 1: __UpperCAmelCase : Tuple = "Abnormality detected"
57
import pytest __UpperCAmelCase : int = "__dummy_dataset1__" __UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def lowercase_ ( ) -> Optional[Any]: '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowercase_ ( ) -> Optional[int]: '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict: '''simple docstring''' snake_case__ :Optional[Any] = dataset_loading_script_name snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name script_dir.mkdir(parents=__snake_case ) snake_case__ :List[Any] = script_dir / F'{script_name}.py' with open(__snake_case , "w" ) as f: f.write(__snake_case ) return str(__snake_case )
57
1
import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap __UpperCAmelCase : Optional[Any] = "Usage of script: script_name <size_of_canvas:int>" __UpperCAmelCase : List[Any] = [0] * 1_0_0 + [1] * 1_0 random.shuffle(choice) def lowercase_ ( __snake_case : int ) -> list[list[bool]]: '''simple docstring''' snake_case__ :Union[str, Any] = [[False for i in range(__snake_case )] for j in range(__snake_case )] return canvas def lowercase_ ( __snake_case : list[list[bool]] ) -> None: '''simple docstring''' for i, row in enumerate(__snake_case ): for j, _ in enumerate(__snake_case ): snake_case__ :List[Any] = bool(random.getrandbits(1 ) ) def lowercase_ ( __snake_case : list[list[bool]] ) -> list[list[bool]]: '''simple docstring''' snake_case__ :Optional[int] = np.array(__snake_case ) snake_case__ :Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(__snake_case ): for c, pt in enumerate(__snake_case ): snake_case__ :List[Any] = __judge_point( __snake_case , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) snake_case__ :Optional[int] = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. snake_case__ :list[list[bool]] = current_canvas.tolist() return return_canvas def lowercase_ ( __snake_case : bool , __snake_case : list[list[bool]] ) -> bool: '''simple docstring''' snake_case__ :str = 0 snake_case__ :int = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. snake_case__ :List[str] = pt if pt: if alive < 2: snake_case__ :str = False elif alive == 2 or alive == 3: snake_case__ :Optional[Any] = True elif alive > 3: snake_case__ :int = False else: if alive == 3: snake_case__ :List[Any] = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) __UpperCAmelCase : Any = int(sys.argv[1]) # main working structure of this module. __UpperCAmelCase : Union[str, Any] = create_canvas(canvas_size) seed(c) __UpperCAmelCase , __UpperCAmelCase : int = plt.subplots() fig.show() __UpperCAmelCase : str = ListedColormap(["w", "k"]) try: while True: __UpperCAmelCase : Dict = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
57
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
57
1
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase_ ( __snake_case : List[Any] ) -> List[str]: '''simple docstring''' snake_case__ :Optional[Any] = args.pruning_method snake_case__ :str = args.threshold snake_case__ :List[str] = args.model_name_or_path.rstrip("/" ) snake_case__ :List[Any] = args.target_model_path print(F'Load fine-pruned model from {model_name_or_path}' ) snake_case__ :str = torch.load(os.path.join(__snake_case , "pytorch_model.bin" ) ) snake_case__ :List[Any] = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: snake_case__ :Dict = tensor print(F'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: snake_case__ :List[Any] = tensor print(F'Copied layer {name}' ) elif "bias" in name: snake_case__ :Union[str, Any] = tensor print(F'Copied layer {name}' ) else: if pruning_method == "magnitude": snake_case__ :Dict = MagnitudeBinarizer.apply(inputs=__snake_case , threshold=__snake_case ) snake_case__ :Union[str, Any] = tensor * mask print(F'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue snake_case__ :Dict = name[:-6] snake_case__ :Optional[int] = model[F'{prefix_}mask_scores'] snake_case__ :int = TopKBinarizer.apply(__snake_case , __snake_case ) snake_case__ :Tuple = tensor * mask print(F'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue snake_case__ :int = name[:-6] snake_case__ :Optional[Any] = model[F'{prefix_}mask_scores'] snake_case__ :int = ThresholdBinarizer.apply(__snake_case , __snake_case , __snake_case ) snake_case__ :Tuple = tensor * mask print(F'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue snake_case__ :Dict = name[:-6] snake_case__ :int = model[F'{prefix_}mask_scores'] snake_case__ , snake_case__ :Any = -0.1, 1.1 snake_case__ :List[Any] = torch.sigmoid(__snake_case ) snake_case__ :Optional[Any] = s * (r - l) + l snake_case__ :List[Any] = s_bar.clamp(min=0.0 , max=1.0 ) snake_case__ :int = tensor * mask print(F'Pruned layer {name}' ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: snake_case__ :Optional[int] = os.path.join( os.path.dirname(__snake_case ) , F'bertarized_{os.path.basename(__snake_case )}' ) if not os.path.isdir(__snake_case ): shutil.copytree(__snake_case , __snake_case ) print(F'\nCreated folder {target_model_path}' ) torch.save(__snake_case , os.path.join(__snake_case , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": __UpperCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __UpperCAmelCase : Dict = parser.parse_args() main(args)
57
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __UpperCAmelCase : Dict = True except ImportError: __UpperCAmelCase : List[Any] = False __UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase_ ( __snake_case : Namespace ) -> Dict: '''simple docstring''' return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class _snake_case ( _A ): @staticmethod def lowerCAmelCase_ ( UpperCamelCase ) -> Any: snake_case__ :Dict = parser.add_parser("add-new-model" ) add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." ) add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." ) add_new_model_parser.add_argument( "--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." ) add_new_model_parser.set_defaults(func=UpperCamelCase ) def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any: snake_case__ :Union[str, Any] = testing snake_case__ :Union[str, Any] = testing_file snake_case__ :List[str] = path def lowerCAmelCase_ ( self ) -> List[Any]: warnings.warn( "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. " "It is not actively maintained anymore, so might give a result that won't pass all tests and quality " "checks, you should use `transformers-cli add-new-model-like` instead." ) if not _has_cookiecutter: raise ImportError( "Model creation dependencies are required to use the `add_new_model` command. Install them by running " "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]] if len(UpperCamelCase ) > 0: raise ValueError( "Several directories starting with `cookiecutter-template-` in current working directory. " "Please clean your directory by removing all folders starting with `cookiecutter-template-` or " "change your working directory." ) snake_case__ :str = ( Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model" # Execute cookiecutter if not self._testing: cookiecutter(str(UpperCamelCase ) ) else: with open(self._testing_file ,"r" ) as configuration_file: snake_case__ :str = json.load(UpperCamelCase ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,) snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0] # Retrieve configuration with open(directory + "/configuration.json" ,"r" ) as configuration_file: snake_case__ :Dict = json.load(UpperCamelCase ) snake_case__ :Optional[Any] = configuration["lowercase_modelname"] snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"] os.remove(f'{directory}/configuration.json' ) snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase ) os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase ) # Tests require submodules as they have parent imports with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ): pass shutil.move( f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,) shutil.move( f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,) def remove_copy_lines(UpperCamelCase ): with open(UpperCamelCase ,"r" ) as f: snake_case__ :List[str] = f.readlines() with open(UpperCamelCase ,"w" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(UpperCamelCase ) if output_pytorch: if not self._testing: remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' ) if output_tensorflow: if not self._testing: remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ) if output_flax: if not self._testing: remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' ) shutil.move( f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,) else: os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' ) os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ) shutil.move( f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,) shutil.move( f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,) shutil.move( f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ): # Create temp file snake_case__ , snake_case__ :Optional[Any] = mkstemp() snake_case__ :Optional[Any] = False with fdopen(UpperCamelCase ,"w" ) as new_file: with open(UpperCamelCase ) as old_file: for line in old_file: new_file.write(UpperCamelCase ) if line_to_copy_below in line: snake_case__ :Optional[Any] = True for line_to_copy in lines_to_copy: new_file.write(UpperCamelCase ) if not line_found: raise ValueError(f'Line {line_to_copy_below} was not found in file.' ) # Copy the file permissions from the old file to the new file copymode(UpperCamelCase ,UpperCamelCase ) # Remove original file remove(UpperCamelCase ) # Move new file move(UpperCamelCase ,UpperCamelCase ) def skip_units(UpperCamelCase ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(UpperCamelCase ): with open(UpperCamelCase ) as datafile: snake_case__ :int = [] snake_case__ :Optional[int] = False snake_case__ :List[str] = False for line in datafile: if "# To replace in: " in line and "##" not in line: snake_case__ :Optional[Any] = line.split("\"" )[1] snake_case__ :Tuple = skip_units(UpperCamelCase ) elif "# Below: " in line and "##" not in line: snake_case__ :Optional[Any] = line.split("\"" )[1] snake_case__ :List[str] = skip_units(UpperCamelCase ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) snake_case__ :Tuple = [] elif "# Replace with" in line and "##" not in line: snake_case__ :Optional[Any] = [] elif "##" not in line: lines_to_copy.append(UpperCamelCase ) remove(UpperCamelCase ) replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' ) os.rmdir(UpperCamelCase )
57
1
from math import pi, sqrt def lowercase_ ( __snake_case : float ) -> float: '''simple docstring''' if num <= 0: raise ValueError("math domain error" ) if num > 1_7_1.5: raise OverflowError("math range error" ) elif num - int(__snake_case ) not in (0, 0.5): raise NotImplementedError("num must be an integer or a half-integer" ) elif num == 0.5: return sqrt(__snake_case ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowercase_ ( ) -> None: '''simple docstring''' assert gamma(0.5 ) == sqrt(__snake_case ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __UpperCAmelCase : str = 1.0 while num: __UpperCAmelCase : List[str] = float(input("Gamma of: ")) print(F'''gamma({num}) = {gamma(num)}''') print("\nEnter 0 to exit...")
57
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer __UpperCAmelCase : str = logging.get_logger(__name__) __UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : List[Any] = { "vocab_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json" }, "merges_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt" }, } __UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4} __UpperCAmelCase : List[str] = {} class _snake_case ( _A ): _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_INIT_CONFIGURATION _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = HerbertTokenizer def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict: super().__init__( UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]: snake_case__ :Optional[int] = [self.cls_token_id] snake_case__ :Any = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1] def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]: snake_case__ :Any = [self.sep_token_id] snake_case__ :Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]: snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase ) return tuple(UpperCamelCase )
57
1
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class _snake_case ( datasets.BuilderConfig ): _A = None class _snake_case ( datasets.ArrowBasedBuilder ): _A = PandasConfig def lowerCAmelCase_ ( self ) -> str: return datasets.DatasetInfo(features=self.config.features ) def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[str]: if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) snake_case__ :Any = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCamelCase ,(str, list, tuple) ): snake_case__ :List[Any] = data_files if isinstance(UpperCamelCase ,UpperCamelCase ): snake_case__ :Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive snake_case__ :int = [dl_manager.iter_files(UpperCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"files": files} )] snake_case__ :Tuple = [] for split_name, files in data_files.items(): if isinstance(UpperCamelCase ,UpperCamelCase ): snake_case__ :List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive snake_case__ :Optional[Any] = [dl_manager.iter_files(UpperCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCamelCase ,gen_kwargs={"files": files} ) ) return splits def lowerCAmelCase_ ( self ,UpperCamelCase ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example snake_case__ :Any = table_cast(UpperCamelCase ,self.config.features.arrow_schema ) return pa_table def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]: for i, file in enumerate(itertools.chain.from_iterable(UpperCamelCase ) ): with open(UpperCamelCase ,"rb" ) as f: snake_case__ :Optional[int] = pa.Table.from_pandas(pd.read_pickle(UpperCamelCase ) ) yield i, self._cast_table(UpperCamelCase )
57
def lowercase_ ( __snake_case : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError("p should not be less than 2!" ) elif p == 2: return True snake_case__ :List[str] = 4 snake_case__ :Optional[int] = (1 << p) - 1 for _ in range(p - 2 ): snake_case__ :List[Any] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(1_1))
57
1
from typing import Any def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list: '''simple docstring''' _validation( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # Creates data structures and fill initial step snake_case__ :dict = {} snake_case__ :dict = {} for state in states_space: snake_case__ :List[Any] = observations_space[0] snake_case__ :str = ( initial_probabilities[state] * emission_probabilities[state][observation] ) snake_case__ :str = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__snake_case ) ): snake_case__ :Any = observations_space[o] snake_case__ :Tuple = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function snake_case__ :Tuple = "" snake_case__ :Union[str, Any] = -1 for k_state in states_space: snake_case__ :int = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: snake_case__ :str = probability snake_case__ :Tuple = k_state # Update probabilities and pointers dicts snake_case__ :List[str] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) snake_case__ :List[str] = arg_max # The final observation snake_case__ :str = observations_space[len(__snake_case ) - 1] # argmax for given final observation snake_case__ :Optional[int] = "" snake_case__ :List[str] = -1 for k_state in states_space: snake_case__ :List[str] = probabilities[(k_state, final_observation)] if probability > max_probability: snake_case__ :List[str] = probability snake_case__ :int = k_state snake_case__ :Any = arg_max # Process pointers backwards snake_case__ :int = last_state snake_case__ :List[str] = [] for o in range(len(__snake_case ) - 1 , -1 , -1 ): result.append(__snake_case ) snake_case__ :List[str] = pointers[previous, observations_space[o]] result.reverse() return result def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' _validate_not_empty( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) _validate_lists(__snake_case , __snake_case ) _validate_dicts( __snake_case , __snake_case , __snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None: '''simple docstring''' _validate_list(__snake_case , "observations_space" ) _validate_list(__snake_case , "states_space" ) def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None: '''simple docstring''' if not isinstance(_object , __snake_case ): snake_case__ :Optional[int] = F'{var_name} must be a list' raise ValueError(__snake_case ) else: for x in _object: if not isinstance(__snake_case , __snake_case ): snake_case__ :Any = F'{var_name} must be a list of strings' raise ValueError(__snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' _validate_dict(__snake_case , "initial_probabilities" , __snake_case ) _validate_nested_dict(__snake_case , "transition_probabilities" ) _validate_nested_dict(__snake_case , "emission_probabilities" ) def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None: '''simple docstring''' _validate_dict(_object , __snake_case , __snake_case ) for x in _object.values(): _validate_dict(__snake_case , __snake_case , __snake_case , __snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None: '''simple docstring''' if not isinstance(_object , __snake_case ): snake_case__ :str = F'{var_name} must be a dict' raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object ): snake_case__ :List[Any] = F'{var_name} all keys must be strings' raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ): snake_case__ :Optional[int] = "nested dictionary " if nested else "" snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(__snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
57
from typing import Any def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list: '''simple docstring''' _validation( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # Creates data structures and fill initial step snake_case__ :dict = {} snake_case__ :dict = {} for state in states_space: snake_case__ :List[Any] = observations_space[0] snake_case__ :str = ( initial_probabilities[state] * emission_probabilities[state][observation] ) snake_case__ :str = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__snake_case ) ): snake_case__ :Any = observations_space[o] snake_case__ :Tuple = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function snake_case__ :Tuple = "" snake_case__ :Union[str, Any] = -1 for k_state in states_space: snake_case__ :int = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: snake_case__ :str = probability snake_case__ :Tuple = k_state # Update probabilities and pointers dicts snake_case__ :List[str] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) snake_case__ :List[str] = arg_max # The final observation snake_case__ :str = observations_space[len(__snake_case ) - 1] # argmax for given final observation snake_case__ :Optional[int] = "" snake_case__ :List[str] = -1 for k_state in states_space: snake_case__ :List[str] = probabilities[(k_state, final_observation)] if probability > max_probability: snake_case__ :List[str] = probability snake_case__ :int = k_state snake_case__ :Any = arg_max # Process pointers backwards snake_case__ :int = last_state snake_case__ :List[str] = [] for o in range(len(__snake_case ) - 1 , -1 , -1 ): result.append(__snake_case ) snake_case__ :List[str] = pointers[previous, observations_space[o]] result.reverse() return result def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' _validate_not_empty( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) _validate_lists(__snake_case , __snake_case ) _validate_dicts( __snake_case , __snake_case , __snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None: '''simple docstring''' _validate_list(__snake_case , "observations_space" ) _validate_list(__snake_case , "states_space" ) def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None: '''simple docstring''' if not isinstance(_object , __snake_case ): snake_case__ :Optional[int] = F'{var_name} must be a list' raise ValueError(__snake_case ) else: for x in _object: if not isinstance(__snake_case , __snake_case ): snake_case__ :Any = F'{var_name} must be a list of strings' raise ValueError(__snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None: '''simple docstring''' _validate_dict(__snake_case , "initial_probabilities" , __snake_case ) _validate_nested_dict(__snake_case , "transition_probabilities" ) _validate_nested_dict(__snake_case , "emission_probabilities" ) def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None: '''simple docstring''' _validate_dict(_object , __snake_case , __snake_case ) for x in _object.values(): _validate_dict(__snake_case , __snake_case , __snake_case , __snake_case ) def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None: '''simple docstring''' if not isinstance(_object , __snake_case ): snake_case__ :str = F'{var_name} must be a dict' raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object ): snake_case__ :List[Any] = F'{var_name} all keys must be strings' raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ): snake_case__ :Optional[int] = "nested dictionary " if nested else "" snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(__snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
57
1
from __future__ import annotations from math import pi def lowercase_ ( __snake_case : float , __snake_case : float , __snake_case : float ) -> dict[str, float]: '''simple docstring''' if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
57
def lowercase_ ( __snake_case : str ) -> list: '''simple docstring''' return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(__snake_case ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
57
1
import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase : int = "" __UpperCAmelCase : Optional[int] = "" __UpperCAmelCase : str = "" __UpperCAmelCase : Optional[int] = 1 # (0 is vertical, 1 is horizontal) def lowercase_ ( ) -> None: '''simple docstring''' snake_case__ , snake_case__ :Dict = get_dataset(__snake_case , __snake_case ) print("Processing..." ) snake_case__ , snake_case__ , snake_case__ :Tuple = update_image_and_anno(__snake_case , __snake_case , __snake_case ) for index, image in enumerate(__snake_case ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case__ :str = random_chars(32 ) snake_case__ :Tuple = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0] snake_case__ :List[str] = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}' cva.imwrite(F'/{file_root}.jpg' , __snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'Success {index+1}/{len(__snake_case )} with {file_name}' ) snake_case__ :Optional[int] = [] for anno in new_annos[index]: snake_case__ :Dict = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}' annos_list.append(__snake_case ) with open(F'/{file_root}.txt' , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def lowercase_ ( __snake_case : str , __snake_case : str ) -> tuple[list, list]: '''simple docstring''' snake_case__ :Optional[int] = [] snake_case__ :Dict = [] for label_file in glob.glob(os.path.join(__snake_case , "*.txt" ) ): snake_case__ :List[str] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(__snake_case ) as in_file: snake_case__ :str = in_file.readlines() snake_case__ :Dict = os.path.join(__snake_case , F'{label_name}.jpg' ) snake_case__ :Optional[Any] = [] for obj_list in obj_lists: snake_case__ :Union[str, Any] = obj_list.rstrip("\n" ).split(" " ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__snake_case ) labels.append(__snake_case ) return img_paths, labels def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : int = 1 ) -> tuple[list, list, list]: '''simple docstring''' snake_case__ :int = [] snake_case__ :List[Any] = [] snake_case__ :Optional[int] = [] for idx in range(len(__snake_case ) ): snake_case__ :Any = [] snake_case__ :Any = img_list[idx] path_list.append(__snake_case ) snake_case__ :Dict = anno_list[idx] snake_case__ :Tuple = cva.imread(__snake_case ) if flip_type == 1: snake_case__ :Dict = cva.flip(__snake_case , __snake_case ) for bbox in img_annos: snake_case__ :int = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: snake_case__ :Union[str, Any] = cva.flip(__snake_case , __snake_case ) for bbox in img_annos: snake_case__ :str = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__snake_case ) new_imgs_list.append(__snake_case ) return new_imgs_list, new_annos_lists, path_list def lowercase_ ( __snake_case : int = 32 ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" snake_case__ :Dict = ascii_lowercase + digits return "".join(random.choice(__snake_case ) for _ in range(__snake_case ) ) if __name__ == "__main__": main() print("DONE ✅")
57
def lowercase_ ( __snake_case : int = 10_00 ) -> int: '''simple docstring''' snake_case__ :int = 3 snake_case__ :int = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
57
1
from __future__ import annotations def lowercase_ ( __snake_case : dict , __snake_case : str ) -> set[str]: '''simple docstring''' snake_case__ , snake_case__ :Tuple = set(__snake_case ), [start] while stack: snake_case__ :Dict = stack.pop() explored.add(__snake_case ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__snake_case ) return explored __UpperCAmelCase : Optional[int] = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], "F": ["C", "E", "G"], "G": ["F"], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, "A"))
57
import os import sys import unittest __UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers") class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Tuple = find_backend(" if not is_torch_available():" ) self.assertEqual(UpperCamelCase ,"torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(UpperCamelCase ,"torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") snake_case__ :str = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :int = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" ,UpperCamelCase ) self.assertIn("torch_and_transformers" ,UpperCamelCase ) self.assertIn("flax_and_transformers" ,UpperCamelCase ) self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" ,objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] ) self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] ) def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" ) self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" ) snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" ) self.assertEqual( UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" ) self.assertEqual(UpperCamelCase ,UpperCamelCase ) def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
57
1
def lowercase_ ( __snake_case : int ) -> str: '''simple docstring''' if isinstance(__snake_case , __snake_case ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(__snake_case , __snake_case ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" snake_case__ :Optional[int] = False if num < 0: snake_case__ :Optional[int] = True snake_case__ :List[str] = -num snake_case__ :list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(__snake_case ) for e in binary ) return "0b" + "".join(str(__snake_case ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
57
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __UpperCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase : List[Any] = ["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys __UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
1
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase : int = logging.get_logger(__name__) def lowercase_ ( __snake_case : Optional[Any] ) -> List[str]: '''simple docstring''' snake_case__ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("module.encoder" ): snake_case__ :List[str] = key.replace("module.encoder" , "glpn.encoder" ) if key.startswith("module.decoder" ): snake_case__ :Dict = key.replace("module.decoder" , "decoder.stages" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 snake_case__ :List[str] = key[key.find("patch_embed" ) + len("patch_embed" )] snake_case__ :Tuple = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(__snake_case )-1}' ) if "norm" in key: snake_case__ :Any = key.replace("norm" , "layer_norm" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 snake_case__ :Optional[Any] = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )] snake_case__ :Tuple = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(__snake_case )-1}' ) if "layer_norm1" in key: snake_case__ :Optional[Any] = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: snake_case__ :str = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 snake_case__ :Any = key[key.find("block" ) + len("block" )] snake_case__ :int = key.replace(F'block{idx}' , F'block.{int(__snake_case )-1}' ) if "attn.q" in key: snake_case__ :Any = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: snake_case__ :Optional[Any] = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: snake_case__ :Optional[int] = key.replace("attn" , "attention.self" ) if "fc1" in key: snake_case__ :int = key.replace("fc1" , "dense1" ) if "fc2" in key: snake_case__ :Any = key.replace("fc2" , "dense2" ) if "linear_pred" in key: snake_case__ :int = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: snake_case__ :str = key.replace("linear_fuse.conv" , "linear_fuse" ) snake_case__ :Any = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 snake_case__ :Optional[Any] = key[key.find("linear_c" ) + len("linear_c" )] snake_case__ :List[Any] = key.replace(F'linear_c{idx}' , F'linear_c.{int(__snake_case )-1}' ) if "bot_conv" in key: snake_case__ :Optional[Any] = key.replace("bot_conv" , "0.convolution" ) if "skip_conv1" in key: snake_case__ :List[Any] = key.replace("skip_conv1" , "1.convolution" ) if "skip_conv2" in key: snake_case__ :int = key.replace("skip_conv2" , "2.convolution" ) if "fusion1" in key: snake_case__ :List[str] = key.replace("fusion1" , "1.fusion" ) if "fusion2" in key: snake_case__ :Optional[int] = key.replace("fusion2" , "2.fusion" ) if "fusion3" in key: snake_case__ :Union[str, Any] = key.replace("fusion3" , "3.fusion" ) if "fusion" in key and "conv" in key: snake_case__ :Union[str, Any] = key.replace("conv" , "convolutional_layer" ) if key.startswith("module.last_layer_depth" ): snake_case__ :int = key.replace("module.last_layer_depth" , "head.head" ) snake_case__ :Tuple = value return new_state_dict def lowercase_ ( __snake_case : List[str] , __snake_case : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) snake_case__ :Dict = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' ) snake_case__ :List[Any] = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' ) # next, add keys and values (in that order) to the state dict snake_case__ :Union[str, Any] = kv_weight[ : config.hidden_sizes[i], : ] snake_case__ :Optional[int] = kv_bias[: config.hidden_sizes[i]] snake_case__ :List[str] = kv_weight[ config.hidden_sizes[i] :, : ] snake_case__ :List[Any] = kv_bias[config.hidden_sizes[i] :] def lowercase_ ( ) -> Tuple: '''simple docstring''' snake_case__ :Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case__ :Any = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return image @torch.no_grad() def lowercase_ ( __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Dict=False , __snake_case : Tuple=None ) -> Optional[Any]: '''simple docstring''' snake_case__ :Union[str, Any] = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) snake_case__ :List[Any] = GLPNImageProcessor() # prepare image snake_case__ :Dict = prepare_img() snake_case__ :Dict = image_processor(images=__snake_case , return_tensors="pt" ).pixel_values logger.info("Converting model..." ) # load original state dict snake_case__ :Optional[Any] = torch.load(__snake_case , map_location=torch.device("cpu" ) ) # rename keys snake_case__ :List[str] = rename_keys(__snake_case ) # key and value matrices need special treatment read_in_k_v(__snake_case , __snake_case ) # create HuggingFace model and load state dict snake_case__ :List[str] = GLPNForDepthEstimation(__snake_case ) model.load_state_dict(__snake_case ) model.eval() # forward pass snake_case__ :List[str] = model(__snake_case ) snake_case__ :List[str] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: snake_case__ :Union[str, Any] = torch.tensor( [[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] ) elif "kitti" in model_name: snake_case__ :Optional[int] = torch.tensor( [[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] ) else: raise ValueError(F'Unknown model name: {model_name}' ) snake_case__ :str = torch.Size([1, 4_80, 6_40] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , __snake_case , atol=1e-4 ) print("Looks ok!" ) # finally, push to hub if required if push_to_hub: logger.info("Pushing model and image processor to the hub..." ) model.push_to_hub( repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__snake_case , ) if __name__ == "__main__": __UpperCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) parser.add_argument( "--model_name", default="glpn-kitti", type=str, help="Name of the model in case you're pushing to the hub.", ) __UpperCAmelCase : str = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
57
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: # A mock response for an HTTP head request to emulate server down snake_case__ :Tuple = mock.Mock() snake_case__ :List[str] = 500 snake_case__ :Any = {} snake_case__ :Union[str, Any] = HTTPError snake_case__ :Tuple = {} # Download this model to make sure it's in the cache. snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def lowerCAmelCase_ ( self ) -> Dict: # A mock response for an HTTP head request to emulate server down snake_case__ :Union[str, Any] = mock.Mock() snake_case__ :int = 500 snake_case__ :Any = {} snake_case__ :Dict = HTTPError snake_case__ :List[Any] = {} # Download this model to make sure it's in the cache. snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head: snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase_ ( self ) -> int: # This test is for deprecated behavior and can be removed in v5 try: snake_case__ :Union[str, Any] = tempfile.mktemp() with open(UpperCamelCase ,"wb" ) as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase ) snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase ) finally: os.remove(UpperCamelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("tokenizer.json" ,"wb" ) as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase ) snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size ,1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json" ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ) @is_staging_test class _snake_case ( unittest.TestCase ): _A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def lowerCAmelCase_ ( cls ) -> Optional[int]: snake_case__ :List[str] = TOKEN HfFolder.save_token(UpperCamelCase ) @classmethod def lowerCAmelCase_ ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token ,repo_id="test-tokenizer" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" ) except HTTPError: pass def lowerCAmelCase_ ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :str = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token ) snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="test-tokenizer" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) def lowerCAmelCase_ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Any = BertTokenizer(UpperCamelCase ) tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token ) snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) # Reset repo delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" ) self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab ) @require_tokenizers def lowerCAmelCase_ ( self ) -> Any: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase ) # No fast custom tokenizer tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" ) with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase ) bert_tokenizer.save_pretrained(UpperCamelCase ) snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase ) tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token ) snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" ) snake_case__ :List[str] = AutoTokenizer.from_pretrained( f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" ) class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> List[Any]: snake_case__ :int = Trie() trie.add("Hello 友達" ) self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} ) trie.add("Hello" ) trie.data self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} ) def lowerCAmelCase_ ( self ) -> int: snake_case__ :List[str] = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] ) trie.add("[CLS]" ) trie.add("extra_id_1" ) trie.add("extra_id_100" ) self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Optional[Any] = Trie() trie.add("A" ) self.assertEqual(trie.split("ABC" ) ,["A", "BC"] ) self.assertEqual(trie.split("BCA" ) ,["BC", "A"] ) def lowerCAmelCase_ ( self ) -> Dict: snake_case__ :Any = Trie() trie.add("TOKEN]" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :List[Any] = Trie() trie.add("A" ) trie.add("P" ) trie.add("[SPECIAL_TOKEN]" ) self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] ) def lowerCAmelCase_ ( self ) -> Tuple: snake_case__ :str = Trie() trie.add("AB" ) trie.add("B" ) trie.add("C" ) self.assertEqual(trie.split("ABC" ) ,["AB", "C"] ) def lowerCAmelCase_ ( self ) -> Union[str, Any]: snake_case__ :Dict = Trie() trie.add("ABC" ) trie.add("B" ) trie.add("CD" ) self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] ) def lowerCAmelCase_ ( self ) -> int: # Even if the offsets are wrong, we necessarily output correct string # parts. snake_case__ :Optional[int] = Trie() snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] ) self.assertEqual(UpperCamelCase ,["AB", "C"] )
57
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase : Optional[int] = 1_6 __UpperCAmelCase : List[Any] = 3_2 def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]: '''simple docstring''' snake_case__ :List[str] = AutoTokenizer.from_pretrained(__snake_case ) snake_case__ :str = load_dataset("glue" , "mrpc" ) def tokenize_function(__snake_case : Tuple ): # max_length=None => use the model max length (it's actually the default) snake_case__ :Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case__ :int = datasets.map( __snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case__ :Optional[int] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__snake_case : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. snake_case__ :str = DataLoader( tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) snake_case__ :List[Any] = DataLoader( tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Any ) -> int: '''simple docstring''' snake_case__ :int = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case__ :int = config["lr"] snake_case__ :int = int(config["num_epochs"] ) snake_case__ :List[Any] = int(config["seed"] ) snake_case__ :List[str] = int(config["batch_size"] ) snake_case__ :List[Any] = args.model_name_or_path set_seed(__snake_case ) snake_case__ , snake_case__ :Dict = get_dataloaders(__snake_case , __snake_case , __snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case__ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case ) # Instantiate optimizer snake_case__ :List[str] = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case__ :List[Any] = optimizer_cls(params=model.parameters() , lr=__snake_case ) if accelerator.state.deepspeed_plugin is not None: snake_case__ :Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: snake_case__ :Optional[Any] = 1 snake_case__ :Optional[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case__ :Dict = get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , ) else: snake_case__ :List[Any] = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :List[str] = accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # We need to keep track of how many total steps we have iterated over snake_case__ :List[str] = 0 # We also need to keep track of the stating epoch so files are named properly snake_case__ :Optional[int] = 0 # Now we train the model snake_case__ :List[Any] = evaluate.load("glue" , "mrpc" ) snake_case__ :Tuple = 0 snake_case__ :List[str] = {} for epoch in range(__snake_case , __snake_case ): model.train() for step, batch in enumerate(__snake_case ): snake_case__ :int = model(**__snake_case ) snake_case__ :List[Any] = outputs.loss snake_case__ :Tuple = loss / gradient_accumulation_steps accelerator.backward(__snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() snake_case__ :Optional[int] = 0 for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case__ :Optional[int] = model(**__snake_case ) snake_case__ :Optional[int] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times snake_case__ , snake_case__ :Any = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__snake_case ) - 1: snake_case__ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] snake_case__ :int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__snake_case , references=__snake_case , ) snake_case__ :int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , __snake_case ) snake_case__ :List[Any] = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: snake_case__ :List[str] = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f: json.dump(__snake_case , __snake_case ) def lowercase_ ( ) -> List[str]: '''simple docstring''' snake_case__ :Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , ) parser.add_argument( "--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--performance_lower_bound" , type=__snake_case , default=__snake_case , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , ) parser.add_argument( "--num_epochs" , type=__snake_case , default=3 , help="Number of train epochs." , ) snake_case__ :int = parser.parse_args() snake_case__ :Optional[int] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
57
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase : Optional[Any] = 1_6 __UpperCAmelCase : Optional[int] = 3_2 def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]: '''simple docstring''' snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case ) snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" ) def tokenize_function(__snake_case : Tuple ): # max_length=None => use the model max length (it's actually the default) snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case__ :List[Any] = datasets.map( __snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__snake_case : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. snake_case__ :Any = DataLoader( tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) snake_case__ :Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple: '''simple docstring''' model.eval() snake_case__ :Union[str, Any] = 0 for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case__ :List[Any] = model(**__snake_case ) snake_case__ :Any = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times snake_case__ , snake_case__ :Tuple = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__snake_case ) - 1: snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen] snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__snake_case , references=__snake_case , ) snake_case__ :int = metric.compute() return eval_metric["accuracy"] def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any: '''simple docstring''' snake_case__ :Any = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case__ :Union[str, Any] = config["lr"] snake_case__ :List[str] = int(config["num_epochs"] ) snake_case__ :Optional[Any] = int(config["seed"] ) snake_case__ :List[Any] = int(config["batch_size"] ) snake_case__ :List[Any] = args.model_name_or_path set_seed(__snake_case ) snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case ) # Instantiate optimizer snake_case__ :int = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case ) if accelerator.state.deepspeed_plugin is not None: snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: snake_case__ :Any = 1 snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case__ :Optional[Any] = get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , ) else: snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # We need to keep track of how many total steps we have iterated over snake_case__ :Dict = 0 # We also need to keep track of the stating epoch so files are named properly snake_case__ :Union[str, Any] = 0 snake_case__ :List[str] = evaluate.load("glue" , "mrpc" ) snake_case__ :Optional[Any] = num_epochs if args.partial_train_epoch is not None: snake_case__ :List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1] snake_case__ :Dict = "" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break snake_case__ :str = int(__snake_case ) + 1 snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) accelerator.print("resumed checkpoint performance:" , __snake_case ) accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] ) accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] ) with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f: snake_case__ :Tuple = json.load(__snake_case ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model snake_case__ :Optional[int] = {} for epoch in range(__snake_case , __snake_case ): model.train() for step, batch in enumerate(__snake_case ): snake_case__ :str = model(**__snake_case ) snake_case__ :List[str] = outputs.loss snake_case__ :List[Any] = loss / gradient_accumulation_steps accelerator.backward(__snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 snake_case__ :int = F'epoch_{epoch}' snake_case__ :str = os.path.join(args.output_dir , __snake_case ) accelerator.save_state(__snake_case ) snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case ) snake_case__ :List[str] = accuracy snake_case__ :List[str] = lr_scheduler.get_lr()[0] snake_case__ :List[Any] = optimizer.param_groups[0]["lr"] snake_case__ :Dict = epoch snake_case__ :List[Any] = overall_step accelerator.print(F'epoch {epoch}:' , __snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f: json.dump(__snake_case , __snake_case ) def lowercase_ ( ) -> Any: '''simple docstring''' snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , ) parser.add_argument( "--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , ) parser.add_argument( "--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , ) snake_case__ :Any = parser.parse_args() snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
57
1
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) class _snake_case ( _A ): def __init__( self ,*UpperCamelCase ,**UpperCamelCase ) -> None: warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." ,UpperCamelCase ,) super().__init__(*UpperCamelCase ,**UpperCamelCase )
57
from __future__ import annotations class _snake_case : def __init__( self ,UpperCamelCase ) -> None: snake_case__ :Union[str, Any] = data snake_case__ :Node | None = None snake_case__ :Node | None = None def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowercase_ ( __snake_case : Node | None ) -> int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowercase_ ( __snake_case : Node ) -> bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowercase_ ( ) -> None: # Main function for testing. '''simple docstring''' snake_case__ :Dict = Node(1 ) snake_case__ :int = Node(2 ) snake_case__ :Optional[Any] = Node(3 ) snake_case__ :Tuple = Node(4 ) snake_case__ :str = Node(5 ) snake_case__ :Optional[Any] = Node(6 ) snake_case__ :List[Any] = Node(7 ) snake_case__ :List[str] = Node(8 ) snake_case__ :Tuple = Node(9 ) print(is_full_binary_tree(__snake_case ) ) print(depth_of_tree(__snake_case ) ) print("Tree is: " ) display(__snake_case ) if __name__ == "__main__": main()
57
1
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def lowercase_ ( __snake_case : str ) -> None: '''simple docstring''' snake_case__ , snake_case__ :List[Any] = analyze_text(__snake_case ) snake_case__ :List[str] = list(" " + ascii_lowercase ) # what is our total sum of probabilities. snake_case__ :int = sum(single_char_strings.values() ) # one length string snake_case__ :Optional[int] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: snake_case__ :Union[str, Any] = single_char_strings[ch] snake_case__ :Dict = my_str / all_sum my_fir_sum += prob * math.loga(__snake_case ) # entropy formula. # print entropy print(F'{round(-1 * my_fir_sum ):.1f}' ) # two len string snake_case__ :int = sum(two_char_strings.values() ) snake_case__ :List[Any] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: snake_case__ :str = cha + cha if sequence in two_char_strings: snake_case__ :Dict = two_char_strings[sequence] snake_case__ :Any = int(__snake_case ) / all_sum my_sec_sum += prob * math.loga(__snake_case ) # print second entropy print(F'{round(-1 * my_sec_sum ):.1f}' ) # print the difference between them print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' ) def lowercase_ ( __snake_case : str ) -> tuple[dict, dict]: '''simple docstring''' snake_case__ :List[str] = Counter() # type: ignore snake_case__ :List[str] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(__snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def lowercase_ ( ) -> str: '''simple docstring''' import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
57
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __UpperCAmelCase : List[Any] = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __UpperCAmelCase : int = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("\n".join(upper_files) + "\n") __UpperCAmelCase : Any = [file for file in filepaths if " " in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("\n".join(space_files) + "\n") __UpperCAmelCase : str = [file for file in filepaths if "-" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("\n".join(hyphen_files) + "\n") __UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("\n".join(nodir_files) + "\n") __UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
57
1
import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( __snake_case : str , __snake_case : str , __snake_case : str ) -> Union[str, Any]: '''simple docstring''' def get_masked_lm_array(__snake_case : str ): snake_case__ :Tuple = F'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE' snake_case__ :Tuple = tf.train.load_variable(__snake_case , __snake_case ) if "kernel" in name: snake_case__ :List[str] = array.transpose() return torch.from_numpy(__snake_case ) def get_encoder_array(__snake_case : str ): snake_case__ :Optional[Any] = F'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE' snake_case__ :Union[str, Any] = tf.train.load_variable(__snake_case , __snake_case ) if "kernel" in name: snake_case__ :Optional[int] = array.transpose() return torch.from_numpy(__snake_case ) def get_encoder_layer_array(__snake_case : int , __snake_case : str ): snake_case__ :Dict = F'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE' snake_case__ :Optional[int] = tf.train.load_variable(__snake_case , __snake_case ) if "kernel" in name: snake_case__ :Any = array.transpose() return torch.from_numpy(__snake_case ) def get_encoder_attention_layer_array(__snake_case : int , __snake_case : str , __snake_case : Dict ): snake_case__ :List[Any] = F'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE' snake_case__ :Optional[Any] = tf.train.load_variable(__snake_case , __snake_case ) snake_case__ :Union[str, Any] = array.reshape(__snake_case ) if "kernel" in name: snake_case__ :Optional[Any] = array.transpose() return torch.from_numpy(__snake_case ) print(F'Loading model based on config from {config_path}...' ) snake_case__ :Optional[Any] = BertConfig.from_json_file(__snake_case ) snake_case__ :Any = BertForMaskedLM(__snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): snake_case__ :BertLayer = model.bert.encoder.layer[layer_index] # Self-attention snake_case__ :BertSelfAttention = layer.attention.self snake_case__ :Union[str, Any] = get_encoder_attention_layer_array( __snake_case , "_query_dense/kernel" , self_attn.query.weight.data.shape ) snake_case__ :Any = get_encoder_attention_layer_array( __snake_case , "_query_dense/bias" , self_attn.query.bias.data.shape ) snake_case__ :Optional[Any] = get_encoder_attention_layer_array( __snake_case , "_key_dense/kernel" , self_attn.key.weight.data.shape ) snake_case__ :Tuple = get_encoder_attention_layer_array( __snake_case , "_key_dense/bias" , self_attn.key.bias.data.shape ) snake_case__ :Any = get_encoder_attention_layer_array( __snake_case , "_value_dense/kernel" , self_attn.value.weight.data.shape ) snake_case__ :Optional[Any] = get_encoder_attention_layer_array( __snake_case , "_value_dense/bias" , self_attn.value.bias.data.shape ) # Self-attention Output snake_case__ :BertSelfOutput = layer.attention.output snake_case__ :Optional[Any] = get_encoder_attention_layer_array( __snake_case , "_output_dense/kernel" , self_output.dense.weight.data.shape ) snake_case__ :Tuple = get_encoder_attention_layer_array( __snake_case , "_output_dense/bias" , self_output.dense.bias.data.shape ) snake_case__ :Any = get_encoder_layer_array(__snake_case , "_attention_layer_norm/gamma" ) snake_case__ :Dict = get_encoder_layer_array(__snake_case , "_attention_layer_norm/beta" ) # Intermediate snake_case__ :BertIntermediate = layer.intermediate snake_case__ :Optional[int] = get_encoder_layer_array(__snake_case , "_intermediate_dense/kernel" ) snake_case__ :Union[str, Any] = get_encoder_layer_array(__snake_case , "_intermediate_dense/bias" ) # Output snake_case__ :BertOutput = layer.output snake_case__ :Any = get_encoder_layer_array(__snake_case , "_output_dense/kernel" ) snake_case__ :Any = get_encoder_layer_array(__snake_case , "_output_dense/bias" ) snake_case__ :Any = get_encoder_layer_array(__snake_case , "_output_layer_norm/gamma" ) snake_case__ :Any = get_encoder_layer_array(__snake_case , "_output_layer_norm/beta" ) # Embeddings snake_case__ :Dict = get_encoder_array("_position_embedding_layer/embeddings" ) snake_case__ :Any = get_encoder_array("_type_embedding_layer/embeddings" ) snake_case__ :Dict = get_encoder_array("_embedding_norm_layer/gamma" ) snake_case__ :List[str] = get_encoder_array("_embedding_norm_layer/beta" ) # LM Head snake_case__ :Any = model.cls.predictions.transform snake_case__ :List[str] = get_masked_lm_array("dense/kernel" ) snake_case__ :Optional[int] = get_masked_lm_array("dense/bias" ) snake_case__ :List[Any] = get_masked_lm_array("layer_norm/gamma" ) snake_case__ :Optional[Any] = get_masked_lm_array("layer_norm/beta" ) snake_case__ :Optional[Any] = get_masked_lm_array("embedding_table" ) # Pooling snake_case__ :Union[str, Any] = BertPooler(config=__snake_case ) snake_case__ :BertPooler = get_encoder_array("_pooler_layer/kernel" ) snake_case__ :BertPooler = get_encoder_array("_pooler_layer/bias" ) # Export final model model.save_pretrained(__snake_case ) # Integration test - should load without any errors ;) snake_case__ :str = BertForMaskedLM.from_pretrained(__snake_case ) print(new_model.eval() ) print("Model conversion was done sucessfully!" ) if __name__ == "__main__": __UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __UpperCAmelCase : List[str] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
57
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]: '''simple docstring''' snake_case__ :Dict = "" for i in table: res += inp[i - 1] return res def lowercase_ ( __snake_case : List[str] ) -> int: '''simple docstring''' return data[1:] + data[0] def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case__ :Union[str, Any] = "" for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case__ :int = int("0b" + data[0] + data[-1] , 2 ) snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]: '''simple docstring''' snake_case__ :Tuple = message[:4] snake_case__ :int = message[4:] snake_case__ :int = apply_table(__snake_case , __snake_case ) snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case ) snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741 snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] ) snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741 snake_case__ :int = "0" * (2 - len(__snake_case )) + r snake_case__ :Optional[Any] = apply_table(l + r , __snake_case ) snake_case__ :Tuple = xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": __UpperCAmelCase : Dict = input("Enter 10 bit key: ") __UpperCAmelCase : Tuple = input("Enter 8 bit message: ") __UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9] __UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] __UpperCAmelCase : Tuple = [2, 4, 3, 1] __UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7] __UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6] __UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1] __UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __UpperCAmelCase : int = apply_table(key, paa_table) __UpperCAmelCase : Dict = temp[:5] __UpperCAmelCase : Optional[int] = temp[5:] __UpperCAmelCase : Optional[int] = left_shift(left) __UpperCAmelCase : Union[str, Any] = left_shift(right) __UpperCAmelCase : int = apply_table(left + right, pa_table) __UpperCAmelCase : Tuple = left_shift(left) __UpperCAmelCase : Union[str, Any] = left_shift(right) __UpperCAmelCase : Dict = left_shift(left) __UpperCAmelCase : Optional[Any] = left_shift(right) __UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table) # encryption __UpperCAmelCase : Tuple = apply_table(message, IP) __UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : List[Any] = temp[4:] + temp[:4] __UpperCAmelCase : int = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption __UpperCAmelCase : List[Any] = apply_table(CT, IP) __UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : int = temp[4:] + temp[:4] __UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp) __UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
57
1
def lowercase_ ( __snake_case : int = 10_00 ) -> int: '''simple docstring''' snake_case__ :Union[str, Any] = 2**power snake_case__ :List[str] = 0 while n: snake_case__ , snake_case__ :Dict = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
57
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _snake_case ( _A , _A , _A ): @register_to_config def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int: super().__init__() snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase ) snake_case__ :Any = False snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase ) snake_case__ :Tuple = TaConfig( vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,) snake_case__ :List[str] = nn.ModuleList() for lyr_num in range(UpperCamelCase ): snake_case__ :List[Any] = TaBlock(UpperCamelCase ) self.encoders.append(UpperCamelCase ) snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase ) snake_case__ :Any = nn.Dropout(p=UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int: snake_case__ :str = self.token_embedder(UpperCamelCase ) snake_case__ :int = encoder_input_tokens.shape[1] snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device ) x += self.position_encoding(UpperCamelCase ) snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase ) # inverted the attention mask snake_case__ :Optional[Any] = encoder_input_tokens.size() snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase ) for lyr in self.encoders: snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0] snake_case__ :List[Any] = self.layer_norm(UpperCamelCase ) return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
57
1
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> Any: snake_case__ :Any = inspect.getfile(accelerate.test_utils ) snake_case__ :List[Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 snake_case__ :List[Any] = test_metrics @require_cpu def lowerCAmelCase_ ( self ) -> int: debug_launcher(self.test_metrics.main ,num_processes=1 ) @require_cpu def lowerCAmelCase_ ( self ) -> List[Any]: debug_launcher(self.test_metrics.main ) @require_single_gpu def lowerCAmelCase_ ( self ) -> Tuple: self.test_metrics.main() @require_multi_gpu def lowerCAmelCase_ ( self ) -> Tuple: print(f'Found {torch.cuda.device_count()} devices.' ) snake_case__ :Optional[int] = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase ,env=os.environ.copy() )
57
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} __UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"] def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]: '''simple docstring''' snake_case__ :List[Any] = start # add current to visited visited.append(__snake_case ) snake_case__ :List[str] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # if all neighbors visited add current to sort sort.append(__snake_case ) # if all vertices haven't been visited select a new one to visit if len(__snake_case ) != len(__snake_case ): for vertice in vertices: if vertice not in visited: snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case ) # return sort return sort if __name__ == "__main__": __UpperCAmelCase : Tuple = topological_sort("a", [], []) print(sort)
57
1
def lowercase_ ( ) -> List[str]: '''simple docstring''' snake_case__ :Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] snake_case__ :Dict = 6 snake_case__ :Tuple = 1 snake_case__ :Optional[int] = 19_01 snake_case__ :List[str] = 0 while year < 20_01: day += 7 if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 snake_case__ :Tuple = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 snake_case__ :List[str] = day - 29 else: if day > days_per_month[month - 1]: month += 1 snake_case__ :List[Any] = day - days_per_month[month - 2] if month > 12: year += 1 snake_case__ :str = 1 if year < 20_01 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
57
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self ) -> str: snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ :List[str] = controlnet_params snake_case__ :Union[str, Any] = "bird" snake_case__ :Optional[int] = jax.device_count() snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case__ :Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples ) snake_case__ :List[str] = jax.random.PRNGKey(0 ) snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() ) snake_case__ :int = replicate(UpperCamelCase ) snake_case__ :Any = shard(UpperCamelCase ) snake_case__ :Any = shard(UpperCamelCase ) snake_case__ :str = pipe( prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case__ :Any = images[0, 253:256, 253:256, -1] snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case__ :List[Any] = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa ) snake_case__ :str = controlnet_params snake_case__ :int = "Chef in the kitchen" snake_case__ :List[Any] = jax.device_count() snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case__ :Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples ) snake_case__ :List[str] = jax.random.PRNGKey(0 ) snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() ) snake_case__ :Dict = replicate(UpperCamelCase ) snake_case__ :Tuple = shard(UpperCamelCase ) snake_case__ :Optional[int] = shard(UpperCamelCase ) snake_case__ :Optional[Any] = pipe( prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case__ :List[str] = images[0, 253:256, 253:256, -1] snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case__ :List[str] = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
57
1
def lowercase_ ( __snake_case : int = 10_00 ) -> int: '''simple docstring''' snake_case__ :Union[str, Any] = 2**power snake_case__ :Any = str(__snake_case ) snake_case__ :Tuple = list(__snake_case ) snake_case__ :str = 0 for i in list_num: sum_of_num += int(__snake_case ) return sum_of_num if __name__ == "__main__": __UpperCAmelCase : Any = int(input("Enter the power of 2: ").strip()) print("2 ^ ", power, " = ", 2**power) __UpperCAmelCase : Tuple = solution(power) print("Sum of the digits is: ", result)
57
def lowercase_ ( __snake_case : list ) -> list: '''simple docstring''' if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ): raise TypeError("Sequence must be list of non-negative integers" ) for _ in range(len(__snake_case ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
57
1
from __future__ import annotations import time import numpy as np __UpperCAmelCase : Dict = [8, 5, 9, 7] __UpperCAmelCase : List[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] __UpperCAmelCase : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _snake_case : def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> None: snake_case__ :Optional[int] = claim_vector snake_case__ :Any = allocated_resources_table snake_case__ :Dict = maximum_claim_table def lowerCAmelCase_ ( self ) -> list[int]: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase_ ( self ) -> list[int]: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase_ ( self ) -> list[list[int]]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase_ ( self ) -> dict[int, list[int]]: return {self.__need().index(UpperCamelCase ): i for i in self.__need()} def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> None: snake_case__ :Optional[Any] = self.__need() snake_case__ :Any = self.__allocated_resources_table snake_case__ :Optional[int] = self.__available_resources() snake_case__ :Dict = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: snake_case__ :int = False for each_need in need_list: snake_case__ :int = True for index, need in enumerate(UpperCamelCase ): if need > available_resources[index]: snake_case__ :Tuple = False break if execution: snake_case__ :List[Any] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: snake_case__ :Any = original_need_index print(f'Process {process_number + 1} is executing.' ) # remove the process run from stack need_list.remove(UpperCamelCase ) # update available/freed resources stack snake_case__ :List[str] = np.array(UpperCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(UpperCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def lowerCAmelCase_ ( self ) -> List[Any]: print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f'P{self.__allocated_resources_table.index(UpperCamelCase ) + 1}' + " ".join(f'{it:>8}' for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f'P{self.__maximum_claim_table.index(UpperCamelCase ) + 1}' + " ".join(f'{it:>8}' for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(UpperCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(UpperCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
57
from __future__ import annotations def lowercase_ ( __snake_case : list ) -> float: '''simple docstring''' if not nums: raise ValueError("List is empty" ) return sum(__snake_case ) / len(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
57
1
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[str]=10 ) -> Optional[Any]: '''simple docstring''' snake_case__ :Any = [] for _ in range(__snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase_ ( __snake_case : str , __snake_case : Union[str, Any]=10 ) -> str: '''simple docstring''' snake_case__ :List[Any] = [] for step in range(__snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ :Union[str, Any] = os.path.join(__snake_case , "schedule.bin" ) torch.save(scheduler.state_dict() , __snake_case ) snake_case__ :List[Any] = torch.load(__snake_case ) scheduler.load_state_dict(__snake_case ) return lrs @require_torch class _snake_case ( unittest.TestCase ): def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]: self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) ) for a, b in zip(UpperCamelCase ,UpperCamelCase ): self.assertAlmostEqual(UpperCamelCase ,UpperCamelCase ,delta=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> str: snake_case__ :Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=UpperCamelCase ) snake_case__ :List[Any] = torch.tensor([0.4, 0.2, -0.5] ) snake_case__ :int = nn.MSELoss() # No warmup, constant schedule, no gradient clipping snake_case__ :List[str] = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 ) for _ in range(100 ): snake_case__ :str = criterion(UpperCamelCase ,UpperCamelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 ) def lowerCAmelCase_ ( self ) -> int: snake_case__ :int = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=UpperCamelCase ) snake_case__ :Optional[Any] = torch.tensor([0.4, 0.2, -0.5] ) snake_case__ :Optional[int] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping snake_case__ :Any = Adafactor( params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=UpperCamelCase ,weight_decay=0.0 ,relative_step=UpperCamelCase ,scale_parameter=UpperCamelCase ,warmup_init=UpperCamelCase ,) for _ in range(1_000 ): snake_case__ :Any = criterion(UpperCamelCase ,UpperCamelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 ) @require_torch class _snake_case ( unittest.TestCase ): _A = nn.Linear(50 , 50 ) if is_torch_available() else None _A = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _A = 10 def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ) -> int: self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) ) for a, b in zip(UpperCamelCase ,UpperCamelCase ): self.assertAlmostEqual(UpperCamelCase ,UpperCamelCase ,delta=UpperCamelCase ,msg=UpperCamelCase ) def lowerCAmelCase_ ( self ) -> Optional[Any]: snake_case__ :Dict = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) snake_case__ :Dict = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): snake_case__ , snake_case__ :List[Any] = data snake_case__ :List[str] = scheduler_func(self.optimizer ,**UpperCamelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 ) snake_case__ :int = unwrap_schedule(UpperCamelCase ,self.num_steps ) self.assertListAlmostEqual( UpperCamelCase ,UpperCamelCase ,tol=1E-2 ,msg=f'failed for {scheduler_func} in normal scheduler' ,) snake_case__ :str = scheduler_func(self.optimizer ,**UpperCamelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase ) # wrap to test picklability of the schedule snake_case__ :Dict = unwrap_and_save_reload_schedule(UpperCamelCase ,self.num_steps ) self.assertListEqual(UpperCamelCase ,UpperCamelCase ,msg=f'failed for {scheduler_func} in save and reload' ) class _snake_case : def __init__( self ,UpperCamelCase ) -> List[str]: snake_case__ :Any = fn def __call__( self ,*UpperCamelCase ,**UpperCamelCase ) -> Dict: return self.fn(*UpperCamelCase ,**UpperCamelCase ) @classmethod def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Any: snake_case__ :Dict = list(map(self ,scheduler.lr_lambdas ) )
57
from __future__ import annotations import math def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int: '''simple docstring''' if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__snake_case ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) return min( minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , ) def lowercase_ ( ) -> None: '''simple docstring''' snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] snake_case__ :int = math.log(len(__snake_case ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
57
1
import argparse import math import traceback import dateutil.parser as date_parser import requests def lowercase_ ( __snake_case : List[str] ) -> str: '''simple docstring''' snake_case__ :str = {} snake_case__ :Optional[int] = job["started_at"] snake_case__ :int = job["completed_at"] snake_case__ :Optional[Any] = date_parser.parse(SCREAMING_SNAKE_CASE_ ) snake_case__ :List[str] = date_parser.parse(SCREAMING_SNAKE_CASE_ ) snake_case__ :str = round((end_datetime - start_datetime).total_seconds() / 6_0.0 ) snake_case__ :List[str] = start snake_case__ :List[Any] = end snake_case__ :str = duration_in_min return job_info def lowercase_ ( __snake_case : Any , __snake_case : List[Any]=None ) -> str: '''simple docstring''' snake_case__ :List[Any] = None if token is not None: snake_case__ :Any = {"Accept": "application/vnd.github+json", "Authorization": F'Bearer {token}'} snake_case__ :List[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' snake_case__ :str = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() snake_case__ :str = {} try: job_time.update({job["name"]: extract_time_from_single_job(SCREAMING_SNAKE_CASE_ ) for job in result["jobs"]} ) snake_case__ :Optional[Any] = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(SCREAMING_SNAKE_CASE_ ): snake_case__ :List[str] = requests.get(url + F'&page={i + 2}' , headers=SCREAMING_SNAKE_CASE_ ).json() job_time.update({job["name"]: extract_time_from_single_job(SCREAMING_SNAKE_CASE_ ) for job in result["jobs"]} ) return job_time except Exception: print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} if __name__ == "__main__": __UpperCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") __UpperCAmelCase : int = parser.parse_args() __UpperCAmelCase : Optional[int] = get_job_time(args.workflow_run_id) __UpperCAmelCase : Optional[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'''{k}: {v['duration']}''')
700
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any: '''simple docstring''' snake_case__ :Optional[Any] = b.T snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 ) snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 ) snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case ) snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :] return d def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any: '''simple docstring''' snake_case__ :Optional[Any] = x.reshape(-1 , 3 ) snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case ) return np.argmin(__snake_case , axis=1 ) class _snake_case ( _A ): _A = ['pixel_values'] def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None: super().__init__(**UpperCamelCase ) snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256} snake_case__ :str = get_size_dict(UpperCamelCase ) snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None snake_case__ :str = do_resize snake_case__ :List[str] = size snake_case__ :List[Any] = resample snake_case__ :Union[str, Any] = do_normalize snake_case__ :int = do_color_quantize def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray: snake_case__ :List[str] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' ) return resize( UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray: snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase ) snake_case__ :List[Any] = image - 1 return image def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image: snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize snake_case__ :int = size if size is not None else self.size snake_case__ :Tuple = get_size_dict(UpperCamelCase ) snake_case__ :str = resample if resample is not None else self.resample snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize snake_case__ :List[Any] = clusters if clusters is not None else self.clusters snake_case__ :str = np.array(UpperCamelCase ) snake_case__ :int = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images] if do_normalize: snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images] if do_color_quantize: snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) snake_case__ :Union[str, Any] = np.array(UpperCamelCase ) snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) snake_case__ :List[Any] = images.shape[0] snake_case__ :str = images.reshape(UpperCamelCase ,-1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. snake_case__ :Any = list(UpperCamelCase ) else: snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images] snake_case__ :List[str] = {"input_ids": images} return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
57
0